aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/configure.in4
-rw-r--r--erts/doc/src/erl_nif.xml284
-rw-r--r--erts/doc/src/erlang.xml37
-rw-r--r--erts/doc/src/notes.xml167
-rw-r--r--erts/doc/src/zlib.xml227
-rw-r--r--erts/emulator/Makefile.in10
-rw-r--r--erts/emulator/beam/atom.c5
-rw-r--r--erts/emulator/beam/beam_bif_load.c7
-rw-r--r--erts/emulator/beam/beam_bp.c3
-rw-r--r--erts/emulator/beam/beam_emu.c2
-rw-r--r--erts/emulator/beam/bif.c56
-rw-r--r--erts/emulator/beam/bif.tab11
-rw-r--r--erts/emulator/beam/big.c11
-rw-r--r--erts/emulator/beam/code_ix.c3
-rw-r--r--erts/emulator/beam/dist.c3
-rw-r--r--erts/emulator/beam/erl_alloc.c3
-rw-r--r--erts/emulator/beam/erl_alloc.h11
-rw-r--r--erts/emulator/beam/erl_alloc.types7
-rw-r--r--erts/emulator/beam/erl_alloc_util.c57
-rw-r--r--erts/emulator/beam/erl_alloc_util.h5
-rw-r--r--erts/emulator/beam/erl_async.c6
-rw-r--r--erts/emulator/beam/erl_bif_info.c492
-rw-r--r--erts/emulator/beam/erl_bif_unique.c6
-rw-r--r--erts/emulator/beam/erl_bits.c31
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c3
-rw-r--r--erts/emulator/beam/erl_db.c63
-rw-r--r--erts/emulator/beam/erl_db.h5
-rw-r--r--erts/emulator/beam/erl_db_hash.c24
-rw-r--r--erts/emulator/beam/erl_db_hash.h4
-rw-r--r--erts/emulator/beam/erl_dirty_bif.tab5
-rw-r--r--erts/emulator/beam/erl_driver.h19
-rw-r--r--erts/emulator/beam/erl_drv_nif.h25
-rw-r--r--erts/emulator/beam/erl_drv_thread.c29
-rw-r--r--erts/emulator/beam/erl_fun.c3
-rw-r--r--erts/emulator/beam/erl_gc.c3
-rw-r--r--erts/emulator/beam/erl_hl_timer.c37
-rw-r--r--erts/emulator/beam/erl_init.c11
-rw-r--r--erts/emulator/beam/erl_instrument.c6
-rw-r--r--erts/emulator/beam/erl_io_queue.c1238
-rw-r--r--erts/emulator/beam/erl_io_queue.h201
-rw-r--r--erts/emulator/beam/erl_lock_check.c287
-rw-r--r--erts/emulator/beam/erl_lock_check.h50
-rw-r--r--erts/emulator/beam/erl_lock_count.c961
-rw-r--r--erts/emulator/beam/erl_lock_count.h998
-rw-r--r--erts/emulator/beam/erl_lock_flags.c59
-rw-r--r--erts/emulator/beam/erl_lock_flags.h78
-rw-r--r--erts/emulator/beam/erl_msacc.c6
-rw-r--r--erts/emulator/beam/erl_mtrace.c6
-rw-r--r--erts/emulator/beam/erl_nif.c391
-rw-r--r--erts/emulator/beam/erl_nif.h23
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h25
-rw-r--r--erts/emulator/beam/erl_node_tables.c44
-rw-r--r--erts/emulator/beam/erl_node_tables.h4
-rw-r--r--erts/emulator/beam/erl_port.h22
-rw-r--r--erts/emulator/beam/erl_port_task.c37
-rw-r--r--erts/emulator/beam/erl_port_task.h8
-rw-r--r--erts/emulator/beam/erl_process.c51
-rw-r--r--erts/emulator/beam/erl_process_lock.c209
-rw-r--r--erts/emulator/beam/erl_process_lock.h180
-rw-r--r--erts/emulator/beam/erl_ptab.c3
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c2
-rw-r--r--erts/emulator/beam/erl_smp.h133
-rw-r--r--erts/emulator/beam/erl_thr_progress.c33
-rw-r--r--erts/emulator/beam/erl_threads.h406
-rw-r--r--erts/emulator/beam/erl_time.h7
-rw-r--r--erts/emulator/beam/erl_time_sup.c164
-rw-r--r--erts/emulator/beam/erl_trace.c10
-rw-r--r--erts/emulator/beam/erl_utils.h1
-rw-r--r--erts/emulator/beam/export.c3
-rw-r--r--erts/emulator/beam/global.h6
-rw-r--r--erts/emulator/beam/io.c1067
-rw-r--r--erts/emulator/beam/module.c3
-rw-r--r--erts/emulator/beam/ops.tab8
-rw-r--r--erts/emulator/beam/register.c3
-rw-r--r--erts/emulator/beam/safe_hash.c23
-rw-r--r--erts/emulator/beam/safe_hash.h7
-rw-r--r--erts/emulator/beam/sys.h4
-rw-r--r--erts/emulator/beam/utils.c90
-rw-r--r--erts/emulator/drivers/common/efile_drv.c3
-rw-r--r--erts/emulator/drivers/common/inet_drv.c51
-rw-r--r--erts/emulator/drivers/common/zlib_drv.c792
-rw-r--r--erts/emulator/drivers/unix/ttsl_drv.c108
-rw-r--r--erts/emulator/hipe/hipe_bif0.c3
-rw-r--r--erts/emulator/nifs/common/zlib_nif.c1019
-rw-r--r--erts/emulator/pcre/local_config.h2
-rw-r--r--erts/emulator/pcre/pcre-8.40.tar.bz2bin1560119 -> 0 bytes
-rw-r--r--erts/emulator/pcre/pcre-8.41.tar.bz2bin0 -> 1561874 bytes
-rw-r--r--erts/emulator/pcre/pcre.h4
-rw-r--r--erts/emulator/pcre/pcre_compile.c16
-rw-r--r--erts/emulator/pcre/pcre_dfa_exec.c4
-rw-r--r--erts/emulator/pcre/pcre_exec.c2
-rw-r--r--erts/emulator/pcre/pcre_internal.h11
-rw-r--r--erts/emulator/pcre/pcre_jit_compile.c958
-rw-r--r--erts/emulator/pcre/pcre_tables.c4
-rw-r--r--erts/emulator/pcre/pcre_ucd.c14
-rw-r--r--erts/emulator/sys/common/erl_check_io.c36
-rw-r--r--erts/emulator/sys/common/erl_check_io.h9
-rw-r--r--erts/emulator/sys/common/erl_mmap.c9
-rw-r--r--erts/emulator/sys/common/erl_mseg.c6
-rw-r--r--erts/emulator/sys/common/erl_poll.c38
-rw-r--r--erts/emulator/sys/common/erl_poll.h4
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h4
-rw-r--r--erts/emulator/sys/unix/sys.c15
-rw-r--r--erts/emulator/sys/unix/sys_time.c4
-rw-r--r--erts/emulator/sys/win32/erl_poll.c10
-rw-r--r--erts/emulator/sys/win32/sys.c15
-rw-r--r--erts/emulator/sys/win32/sys_env.c3
-rw-r--r--erts/emulator/sys/win32/sys_time.c4
-rw-r--r--erts/emulator/test/Makefile2
-rw-r--r--erts/emulator/test/bif_SUITE.erl31
-rw-r--r--erts/emulator/test/big_SUITE.erl53
-rw-r--r--erts/emulator/test/big_SUITE_data/borders.dat35
-rw-r--r--erts/emulator/test/binary_SUITE.erl3
-rw-r--r--erts/emulator/test/bs_construct_SUITE.erl24
-rw-r--r--erts/emulator/test/fun_SUITE.erl14
-rw-r--r--erts/emulator/test/iovec_SUITE.erl168
-rw-r--r--erts/emulator/test/lcnt_SUITE.erl156
-rw-r--r--erts/emulator/test/nif_SUITE.erl194
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c241
-rw-r--r--erts/emulator/test/port_SUITE.erl44
-rw-r--r--erts/emulator/test/port_SUITE_data/echo_drv.c31
-rw-r--r--erts/emulator/test/register_SUITE.erl9
-rw-r--r--erts/emulator/test/statistics_SUITE.erl34
-rw-r--r--erts/etc/common/Makefile.in7
-rw-r--r--erts/etc/common/escript.c21
-rw-r--r--erts/preloaded/ebin/erlang.beambin106204 -> 106344 bytes
-rw-r--r--erts/preloaded/ebin/init.beambin50344 -> 50380 bytes
-rw-r--r--erts/preloaded/ebin/zlib.beambin14316 -> 19120 bytes
-rw-r--r--erts/preloaded/src/erlang.erl12
-rw-r--r--erts/preloaded/src/init.erl2
-rw-r--r--erts/preloaded/src/zlib.erl703
-rw-r--r--erts/test/system_smoke.spec1
-rw-r--r--erts/vsn.mk2
133 files changed, 9049 insertions, 4422 deletions
diff --git a/erts/configure.in b/erts/configure.in
index 830e3d7776..913315e402 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -1784,6 +1784,8 @@ AC_CHECK_HEADER(sys/resource.h,
[#include <sys/resource.h>])],
[],[])
+AC_CHECK_FUNCS([getrusage])
+
dnl Check if we have kernel poll support
have_kernel_poll=no
AC_CHECK_HEADER(sys/event.h, have_kernel_poll=kqueue)
@@ -3855,7 +3857,7 @@ case $host_os in
darwin*)
# Mach-O linker: a shared lib and a loadable
# object file is not the same thing.
- DED_LDFLAGS="-bundle -flat_namespace -undefined suppress"
+ DED_LDFLAGS="-bundle -bundle_loader ${ERL_TOP}/bin/$host/beam.smp"
case $ARCH in
amd64)
DED_LDFLAGS="-m64 $DED_LDFLAGS"
diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml
index 5a69bed34c..419e41693e 100644
--- a/erts/doc/src/erl_nif.xml
+++ b/erts/doc/src/erl_nif.xml
@@ -206,7 +206,7 @@ ok
<seealso marker="#enif_make_resource">
<c>enif_make_resource</c></seealso>.
The term returned by <c>enif_make_resource</c> is opaque in nature.
- It can be stored and passed between processes on the same node, but
+ It can be stored and passed between processes, but
the only real end usage is to pass it back as an argument to a NIF.
The NIF can then call <seealso marker="#enif_get_resource">
<c>enif_get_resource</c></seealso> and get back a pointer to the
@@ -344,6 +344,81 @@ return term;</code>
<c>enif_convert_time_unit()</c></seealso></item>
</list>
</item>
+ <tag><marker id="enif_ioq"/>I/O Queues</tag>
+ <item>
+ <p>The Erlang nif library contains function for easily working
+ with I/O vectors as used by the unix system call <c>writev</c>.
+ The I/O Queue is not thread safe, so some other synchronization
+ mechanism has to be used.</p>
+ <list type="bulleted">
+ <item><seealso marker="#SysIOVec">
+ <c>SysIOVec</c></seealso></item>
+ <item><seealso marker="#ErlNifIOVec">
+ <c>ErlNifIOVec</c></seealso></item>
+ <item><seealso marker="#enif_ioq_create">
+ <c>enif_ioq_create()</c></seealso></item>
+ <item><seealso marker="#enif_ioq_destroy">
+ <c>enif_ioq_destroy()</c></seealso></item>
+ <item><seealso marker="#enif_ioq_enq_binary">
+ <c>enif_ioq_enq_binary()</c></seealso></item>
+ <item><seealso marker="#enif_ioq_enqv">
+ <c>enif_ioq_enqv()</c></seealso></item>
+ <item><seealso marker="#enif_ioq_deq">
+ <c>enif_ioq_deq()</c></seealso></item>
+ <item><seealso marker="#enif_ioq_peek">
+ <c>enif_ioq_peek()</c></seealso></item>
+ <item><seealso marker="#enif_inspect_iovec">
+ <c>enif_inspect_iovec()</c></seealso></item>
+ <item><seealso marker="#enif_free_iovec">
+ <c>enif_free_iovec()</c></seealso></item>
+ </list>
+ <p>Typical usage when writing to a file descriptor looks like this:</p>
+ <code type="none"><![CDATA[
+int writeiovec(ErlNifEnv *env, ERL_NIF_TERM term, ERL_NIF_TERM *tail,
+ ErlNifIOQueue *q, int fd) {
+
+ ErlNifIOVec vec, *iovec = &vec;
+ SysIOVec *sysiovec;
+ int saved_errno;
+ int iovcnt, n;
+
+ if (!enif_inspect_iovec(env, 64, term, tail, &iovec))
+ return -2;
+
+ if (enif_ioq_size(q) > 0) {
+ /* If the I/O queue contains data we enqueue the iovec and
+ then peek the data to write out of the queue. */
+ if (!enif_ioq_enqv(q, iovec, 0))
+ return -3;
+
+ sysiovec = enif_ioq_peek(q, &iovcnt);
+ } else {
+ /* If the I/O queue is empty we skip the trip through it. */
+ iovcnt = iovec->iovcnt;
+ sysiovec = iovec->iov;
+ }
+
+ /* Attempt to write the data */
+ n = writev(fd, sysiovec, iovcnt);
+ saved_errno = errno;
+
+ if (enif_ioq_size(q) == 0) {
+ /* If the I/O queue was initially empty we enqueue any
+ remaining data into the queue for writing later. */
+ if (n >= 0 && !enif_ioq_enqv(q, iovec, n))
+ return -3;
+ } else {
+ /* Dequeue any data that was written from the queue. */
+ if (n > 0 && !enif_ioq_deq(q, n, NULL))
+ return -4;
+ }
+
+ /* return n, which is either number of bytes written or -1 if
+ some error happened */
+ errno = saved_errno;
+ return n;
+}]]></code>
+ </item>
<tag><marker id="lengthy_work"/>Long-running NIFs</tag>
<item>
<p>As mentioned in the <seealso marker="#WARNING">warning</seealso> text
@@ -837,6 +912,36 @@ typedef enum {
</item>
</taglist>
</item>
+ <tag><marker id="SysIOVec"/><c>SysIOVec</c></tag>
+ <item>
+ <p>A system I/O vector, as used by <c>writev</c> on
+ Unix and <c>WSASend</c> on Win32. It is used in
+ <c>ErlNifIOVec</c> and by
+ <seealso marker="#enif_ioq_peek"><c>enif_ioq_peek</c></seealso>.</p>
+ </item>
+ <tag><marker id="ErlNifIOVec"/><c>ErlNifIOVec</c></tag>
+ <item>
+ <code type="none">
+typedef struct {
+ int iovcnt;
+ size_t size;
+ SysIOVec* iov;
+} ErlNifIOVec;</code>
+ <p>An I/O vector containing <c>iovcnt</c> <c>SysIOVec</c>s
+ pointing to the data. It is used by
+ <seealso marker="#enif_inspect_iovec">
+ <c>enif_inspect_iovec</c></seealso> and
+ <seealso marker="#enif_ioq_enqv">
+ <c>enif_ioq_enqv</c></seealso>.</p>
+ </item>
+ <tag><marker id="ErlNifIOQueueOpts"/><c>ErlNifIOQueueOpts</c></tag>
+ <item>
+ Options to configure a <c>ErlNifIOQueue</c>.
+ <taglist>
+ <tag>ERL_NIF_IOQ_NORMAL</tag>
+ <item><p>Create a normal I/O Queue</p></item>
+ </taglist>
+ </item>
</taglist>
</section>
@@ -1143,6 +1248,31 @@ typedef enum {
</func>
<func>
+ <name><ret>void</ret>
+ <nametext>enif_free_iovec(ErlNifIOvec* iov)</nametext></name>
+ <fsummary>Free an ErlIOVec</fsummary>
+ <desc>
+ <p>Frees an io vector returned from
+ <seealso marker="#enif_inspect_iovec">
+ <c>enif_inspect_iovec</c></seealso>.
+ This is needed only if a <c>NULL</c> environment is passed to
+ <seealso marker="#enif_inspect_iovec">
+ <c>enif_inspect_iovec</c></seealso>.</p>
+ <code type="none"><![CDATA[
+ErlNifIOVec *iovec = NULL;
+size_t max_elements = 128;
+ERL_NIF_TERM tail;
+if (!enif_inspect_iovec(NULL, max_elements, term, &tail, iovec))
+ return 0;
+
+// Do things with the iovec
+
+/* Free the iovector, possibly in another thread or nif function call */
+enif_free_iovec(iovec);]]></code>
+ </desc>
+ </func>
+
+ <func>
<name><ret>int</ret><nametext>enif_get_atom(ErlNifEnv* env, ERL_NIF_TERM
term, char* buf, unsigned size, ErlNifCharEncoding encode)</nametext>
</name>
@@ -1449,6 +1579,127 @@ typedef enum {
</func>
<func>
+ <name><ret>int</ret><nametext>enif_inspect_iovec(ErlNifEnv*
+ env, size_t max_elements, ERL_NIF_TERM iovec_term, ERL_NIF_TERM* tail,
+ ErlNifIOVec** iovec)</nametext></name>
+ <fsummary>Inspect a list of binaries as an ErlNifIOVec.</fsummary>
+ <desc>
+ <p>Fills <c>iovec</c> with the list of binaries provided in
+ <c>iovec_term</c>. The number of elements handled in the call is
+ limited to <c>max_elements</c>, and <c>tail</c> is set to the
+ remainder of the list. Note that the output may be longer than
+ <c>max_elements</c> on some platforms.
+ </p>
+ <p>To create a list of binaries from an arbitrary iolist, use
+ <seealso marker="erts:erlang#iolist_to_iovec/1">
+ <c>erlang:iolist_to_iovec/1</c></seealso>.</p>
+ <p>When calling this function, <c>iovec</c> should contain a pointer to
+ <c>NULL</c> or a ErlNifIOVec structure that should be used if
+ possible. e.g.
+ </p>
+ <code type="none">
+/* Don't use a pre-allocated structure */
+ErlNifIOVec *iovec = NULL;
+enif_inspect_iovec(env, max_elements, term, &amp;tail, &amp;iovec);
+
+/* Use a stack-allocated vector as an optimization for vectors with few elements */
+ErlNifIOVec vec, *iovec = &amp;vec;
+enif_inspect_iovec(env, max_elements, term, &amp;tail, &amp;iovec);
+</code>
+ <p>The contents of the <c>iovec</c> is valid until the called nif
+ function returns. If the <c>iovec</c> should be valid after the nif
+ call returns, it is possible to call this function with a
+ <c>NULL</c> environment. If no environment is given the <c>iovec</c>
+ owns the data in the vector and it has to be explicitly freed using
+ <seealso marker="#enif_free_iovec"><c>enif_free_iovec</c>
+ </seealso>.</p>
+ <p>Returns <c>true</c> on success, or <c>false</c> if <c>iovec_term</c>
+ not an iovec.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name><ret>ErlNifIOQueue *</ret>
+ <nametext>enif_ioq_create(ErlNifIOQueueOpts opts)</nametext></name>
+ <fsummary>Create a new IO Queue</fsummary>
+ <desc>
+ <p>Create a new I/O Queue that can be used to store data.
+ <c>opts</c> has to be set to <c>ERL_NIF_IOQ_NORMAL</c>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name><ret>void</ret>
+ <nametext>enif_ioq_destroy(ErlNifIOQueue *q)</nametext></name>
+ <fsummary>Destroy an IO Queue and free it's content</fsummary>
+ <desc>
+ <p>Destroy the I/O queue and free all of it's contents</p>
+ </desc>
+ </func>
+
+ <func>
+ <name><ret>int</ret>
+ <nametext>enif_ioq_deq(ErlNifIOQueue *q, size_t count, size_t *size)</nametext></name>
+ <fsummary>Dequeue count bytes from the IO Queue</fsummary>
+ <desc>
+ <p>Dequeue <c>count</c> bytes from the I/O queue.
+ If <c>size</c> is not <c>NULL</c>, the new size of the queue
+ is placed there.</p>
+ <p>Returns <c>true</c> on success, or <c>false</c> if the I/O does
+ not contain <c>count</c> bytes. On failure the queue is left un-altered.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name><ret>int</ret>
+ <nametext>enif_ioq_enq_binary(ErlNifIOQueue *q, ErlNifBinary *bin, size_t skip)</nametext></name>
+ <fsummary>Enqueue the binary into the IO Queue</fsummary>
+ <desc>
+ <p>Enqueue the <c>bin</c> into <c>q</c> skipping the first <c>skip</c> bytes.</p>
+ <p>Returns <c>true</c> on success, or <c>false</c> if <c>skip</c> is greater
+ than the size of <c>bin</c>. Any ownership of the binary data is transferred
+ to the queue and <c>bin</c> is to be considered read-only for the rest of the NIF
+ call and then as released.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name><ret>int</ret>
+ <nametext>enif_ioq_enqv(ErlNifIOQueue *q, ErlNifIOVec *iovec, size_t skip)</nametext></name>
+ <fsummary>Enqueue the iovec into the IO Queue</fsummary>
+ <desc>
+ <p>Enqueue the <c>iovec</c> into <c>q</c> skipping the first <c>skip</c> bytes.</p>
+ <p>Returns <c>true</c> on success, or <c>false</c> if <c>skip</c> is greater
+ than the size of <c>iovec</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name><ret>SysIOVec *</ret>
+ <nametext>enif_ioq_peek(ErlNifIOQueue *q, int *iovlen)</nametext></name>
+ <fsummary>Peek inside the IO Queue</fsummary>
+ <desc>
+ <p>Get the I/O queue as a pointer to an array of <c>SysIOVec</c>s.
+ It also returns the number of elements in <c>iovlen</c>.
+ This is the only way to get data out of the queue.</p>
+ <p>Nothing is removed from the queue by this function, that must be done
+ with <seealso marker="#enif_ioq_deq"><c>enif_ioq_deq</c></seealso>.</p>
+ <p>The returned array is suitable to use with the Unix system
+ call <c>writev</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name><ret>size_t</ret>
+ <nametext>enif_ioq_size(ErlNifIOQueue *q)</nametext></name>
+ <fsummary>Get the current size of the IO Queue</fsummary>
+ <desc>
+ <p>Get the size of <c>q</c>.</p>
+ </desc>
+ </func>
+
+ <func>
<name><ret>int</ret>
<nametext>enif_is_atom(ErlNifEnv* env, ERL_NIF_TERM term)</nametext>
</name>
@@ -1952,10 +2203,33 @@ typedef enum {
details, see the <seealso marker="#enif_resource_example">example of
creating and returning a resource object</seealso> in the User's
Guide.</p>
- <p>Notice that the only defined behavior of using a resource term in
- an Erlang program is to store it and send it between processes on the
- same node. Other operations, such as matching or
- <c>term_to_binary</c>, have unpredictable (but harmless) results.</p>
+ <note>
+ <p>Since ERTS 9.0 (OTP-20.0), resource terms have a defined behavior
+ when compared and serialized through <c>term_to_binary</c> or passed
+ between nodes.</p>
+ <list type="bulleted">
+ <item>
+ <p>Two resource terms will compare equal iff they
+ would yield the same resource object pointer when passed to
+ <seealso marker="#enif_get_resource"><c>enif_get_resource</c></seealso>.</p>
+ </item>
+ <item>
+ <p>A resoure term can be serialized with <c>term_to_binary</c> and later
+ be fully recreated if the resource object is still alive when
+ <c>binary_to_term</c> is called. A <em>stale</em> resource term will be
+ returned from <c>binary_to_term</c> if the resource object has
+ been deallocated. <seealso marker="#enif_get_resource"><c>enif_get_resource</c></seealso>
+ will return false for stale resource terms.</p>
+ <p>The same principles of serialization apply when passing
+ resource terms in messages to remote nodes and back again. A
+ resource term will act stale on all nodes except the node where
+ its resource object is still alive in memory.</p>
+ </item>
+ </list>
+ <p>Before ERTS 9.0 (OTP-20.0), all resource terms did
+ compare equal to each other and to empty binaries (<c>&lt;&lt;&gt;&gt;</c>).
+ If serialized, they would be recreated as plain empty binaries.</p>
+ </note>
</desc>
</func>
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index 687ff38cbf..5afac46d21 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -60,6 +60,14 @@
</desc>
</datatype>
<datatype>
+ <name>iovec()</name>
+ <desc>
+ <p>A list of binaries. This datatype is useful to use
+ together with <seealso marker="erl_nif#enif_inspect_iovec">
+ <c>enif_inspect_iovec</c></seealso>.</p>
+ </desc>
+ </datatype>
+ <datatype>
<name name="message_queue_data"></name>
<desc>
<p>See <seealso marker="#process_flag_message_queue_data">
@@ -1943,23 +1951,26 @@ os_prompt%</pre>
<item>The runtime system exits with integer value
<c><anno>Status</anno></c>
as status code to the calling environment (OS).
+ <note>
+ <p>On many platforms, the OS supports only status
+ codes 0-255. A too large status code is truncated by clearing
+ the high bits.</p>
+ </note>
</item>
<tag>string()</tag>
<item>An Erlang crash dump is produced with <c><anno>Status</anno></c>
as slogan. Then the runtime system exits with status code <c>1</c>.
- Note that only code points in the range 0-255 may be used
- and the string will be truncated if longer than 200 characters.
+ The string will be truncated if longer than 200 characters.
+ <note>
+ <p>Before ERTS 9.1 (OTP-20.1) only code points in the range 0-255
+ was accepted in the string. Now any unicode string is valid.</p>
+ </note>
</item>
<tag><c>abort</c></tag>
<item>The runtime system aborts producing a core dump, if that is
enabled in the OS.
</item>
</taglist>
- <note>
- <p>On many platforms, the OS supports only status
- codes 0-255. A too large status code is truncated by clearing
- the high bits.</p>
- </note>
<p>For integer <c><anno>Status</anno></c>, the Erlang runtime system
closes all ports and allows async threads to finish their
operations before exiting. To exit without such flushing, use
@@ -2125,6 +2136,15 @@ os_prompt%</pre>
</func>
<func>
+ <name name="iolist_to_iovec" arity="1"/>
+ <fsummary>Converts an iolist to a iovec.</fsummary>
+ <desc>
+ <p>Returns an iovec that is made from the integers and binaries in
+ <c><anno>IoListOrBinary</anno></c>.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="is_alive" arity="0"/>
<fsummary>Check whether the local node is alive.</fsummary>
<desc>
@@ -6483,6 +6503,9 @@ lists:map(
<p>This is the sum of the runtime for all threads
in the Erlang runtime system and can therefore be greater
than the wall clock time.</p>
+ <warning><p>This value might wrap due to limitations in the
+ underlying functionality provided by the operating system
+ that is used.</p></warning>
<p>Example:</p>
<pre>
> <input>statistics(runtime).</input>
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index 4d7e578738..f6bc6e984b 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -31,11 +31,140 @@
</header>
<p>This document describes the changes made to the ERTS application.</p>
-<section><title>Erts 9.0</title>
+<section><title>Erts 9.0.4</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A timer internal bit-field used for storing scheduler id
+ was too small. As a result, VM internal timer data
+ structures could become inconsistent when using 1024
+ schedulers on the system. Note that systems with less
+ than 1024 schedulers are not effected by this bug.</p>
+ <p>
+ This bug was introduced in ERTS version 7.0 (OTP 18.0).</p>
+ <p>
+ Own Id: OTP-14548 Aux Id: OTP-11997, ERL-468 </p>
+ </item>
+ <item>
+ <p>
+ Automatic cleanup of a BIF timer, when the owner process
+ terminated, could race with the timeout of the timer.
+ This could cause the VM internal data structures to
+ become inconsistent which very likely caused a VM crash.</p>
+ <p>
+ This bug was introduced in ERTS version 9.0 (OTP 20.0).</p>
+ <p>
+ Own Id: OTP-14554 Aux Id: OTP-14356, ERL-468 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 9.0.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Binary append operations did not check for overflow,
+ resulting in nonsensical results when huge binaries were
+ appended.</p>
+ <p>
+ Own Id: OTP-14524</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 9.0.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
<list>
<item>
+ <p>
+ Added missing release notes for OTP-14491 ("performance
+ bug in pre-allocators") which was included in erts-9.0.1
+ (OTP-20.0.1).</p>
+ <p>
+ Own Id: OTP-14494</p>
+ </item>
+ <item>
+ <p>Fixed a bug that prevented TCP sockets from being
+ closed properly on send timeouts.</p>
+ <p>
+ Own Id: OTP-14509</p>
+ </item>
+ <item>
+ <p>
+ Fixed bug in operator <c>bxor</c> causing erroneuos
+ result when one operand is a big <em>negative</em>
+ integer with the lowest <c>N*W</c> bits as zero and the
+ other operand not larger than <c>N*W</c> bits. <c>N</c>
+ is an integer of 1 or larger and <c>W</c> is 32 or 64
+ depending on word size.</p>
+ <p>
+ Own Id: OTP-14514</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 9.0.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fixed a bug in gen_tcp:send where it never returned when
+ repeatedly called on a remotely closed TCP socket.</p>
+ <p>
+ Own Id: OTP-13939 Aux Id: ERL-193 </p>
+ </item>
+ <item>
+ <p>
+ Fixed segfault that could happen during cleanup of
+ aborted erlang:port_command/3 calls. A port_command is
+ aborted if the port is closed at the same time as the
+ port_command was issued. This bug was introduced in
+ erts-8.0.</p>
+ <p>
+ Own Id: OTP-14481</p>
+ </item>
+ <item>
+ <p>
+ Fixed implementation of <c>statistics(wall_clock)</c> and
+ <c>statistics(runtime)</c> so that values do not
+ unnecessarily wrap due to the emulator. Note that the
+ values returned by <c>statistics(runtime)</c> may still
+ wrap due to limitations in the underlying functionality
+ provided by the operating system.</p>
+ <p>
+ Own Id: OTP-14484</p>
+ </item>
+ <item>
+ <p>
+ Fix performance bug in pre-allocators that could cause
+ them to permanently fall back on normal more expensive memory
+ allocation. Pre-allocators are used for quick allocation
+ of short lived meta data used by messages and other
+ scheduled tasks. Bug exists since OTP_R15B02.
+ [this release note was missing in erts-9.0.1]</p>
+ <p>
+ Own Id: OTP-14491</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 9.0</title>
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
<p>Fix various bugs regarding loading, upgrade and purge
of HiPE compiled code:</p> <list> <item>The native code
memory for a purged module was never deallocated.</item>
@@ -631,6 +760,42 @@
</section>
+<section><title>Erts 8.3.5.1</title>
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fixed a bug in gen_tcp:send where it never returned when
+ repeatedly called on a remotely closed TCP socket.</p>
+ <p>
+ Own Id: OTP-13939 Aux Id: ERL-193 </p>
+ </item>
+ <item>
+ <p>
+ Fixed segfault that could happen during cleanup of
+ aborted erlang:port_command/3 calls. A port_command is
+ aborted if the port is closed at the same time as the
+ port_command was issued. This bug was introduced in
+ erts-8.0.</p>
+ <p>
+ Own Id: OTP-14481</p>
+ </item>
+ <item>
+ <p>
+ Fixed implementation of <c>statistics(wall_clock)</c> and
+ <c>statistics(runtime)</c> so that values do not
+ unnecessarily wrap due to the emulator. Note that the
+ values returned by <c>statistics(runtime)</c> may still
+ wrap due to limitations in the underlying functionality
+ provided by the operating system.</p>
+ <p>
+ Own Id: OTP-14484</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 8.3.5</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/erts/doc/src/zlib.xml b/erts/doc/src/zlib.xml
index 1d272c4c18..f5cc1b1e64 100644
--- a/erts/doc/src/zlib.xml
+++ b/erts/doc/src/zlib.xml
@@ -65,13 +65,17 @@ list_to_binary([Compressed|Last])</pre>
<tag><c>badarg</c></tag>
<item>Bad argument.
</item>
+ <tag><c>not_initialized</c></tag>
+ <item>The stream hasn't been initialized, eg. if
+ <seealso marker="#inflateInit/1"><c>inflateInit/1</c></seealso> wasn't
+ called prior to a call to
+ <seealso marker="#inflate/2"><c>inflate/2</c></seealso>.
+ </item>
<tag><c>data_error</c></tag>
<item>The data contains errors.
</item>
<tag><c>stream_error</c></tag>
<item>Inconsistent stream state.</item>
- <tag><c>einval</c></tag>
- <item>Bad value or wrong function called.</item>
<tag><c>{need_dictionary,Adler32}</c></tag>
<item>See <seealso marker="#inflate/2"><c>inflate/2</c></seealso>.
</item>
@@ -90,6 +94,9 @@ list_to_binary([Compressed|Last])</pre>
<name name="zlevel"/>
</datatype>
<datatype>
+ <name name="zflush"/>
+ </datatype>
+ <datatype>
<name name="zmemlevel"/>
</datatype>
<datatype>
@@ -112,6 +119,11 @@ list_to_binary([Compressed|Last])</pre>
<fsummary>Calculate the Adler checksum.</fsummary>
<desc>
<p>Calculates the Adler-32 checksum for <c><anno>Data</anno></c>.</p>
+ <warning>
+ <p>This function is deprecated and will be removed in a future
+ release. Use <seealso marker="erts:erlang#adler32/1">
+ <c>erlang:adler32/1</c></seealso> instead.</p>
+ </warning>
</desc>
</func>
@@ -127,6 +139,11 @@ list_to_binary([Compressed|Last])</pre>
Crc = lists:foldl(fun(Data,Crc0) ->
zlib:adler32(Z, Crc0, Data),
end, zlib:adler32(Z,&lt;&lt; &gt;&gt;), Datas)</pre>
+ <warning>
+ <p>This function is deprecated and will be removed in a future
+ release. Use <seealso marker="erts:erlang#adler32/2">
+ <c>erlang:adler32/2</c></seealso> instead.</p>
+ </warning>
</desc>
</func>
@@ -141,6 +158,11 @@ Crc = lists:foldl(fun(Data,Crc0) ->
<p>This function returns the <c><anno>Adler</anno></c> checksum of
<c>[Data1,Data2]</c>, requiring only <c><anno>Adler1</anno></c>,
<c><anno>Adler2</anno></c>, and <c><anno>Size2</anno></c>.</p>
+ <warning>
+ <p>This function is deprecated and will be removed in a future
+ release. Use <seealso marker="erts:erlang#adler32_combine/3">
+ <c>erlang:adler32_combine/3</c></seealso> instead.</p>
+ </warning>
</desc>
</func>
@@ -165,6 +187,12 @@ Crc = lists:foldl(fun(Data,Crc0) ->
<fsummary>Get current CRC.</fsummary>
<desc>
<p>Gets the current calculated CRC checksum.</p>
+ <warning>
+ <p>This function is deprecated and will be removed in a future
+ release. Use <seealso marker="erts:erlang#crc32/1">
+ <c>erlang:crc32/1</c></seealso> on the uncompressed data
+ instead.</p>
+ </warning>
</desc>
</func>
@@ -173,6 +201,11 @@ Crc = lists:foldl(fun(Data,Crc0) ->
<fsummary>Calculate CRC.</fsummary>
<desc>
<p>Calculates the CRC checksum for <c><anno>Data</anno></c>.</p>
+ <warning>
+ <p>This function is deprecated and will be removed in a future
+ release. Use <seealso marker="erts:erlang#crc32/1">
+ <c>erlang:crc32/1</c></seealso> instead.</p>
+ </warning>
</desc>
</func>
@@ -188,6 +221,11 @@ Crc = lists:foldl(fun(Data,Crc0) ->
Crc = lists:foldl(fun(Data,Crc0) ->
zlib:crc32(Z, Crc0, Data),
end, zlib:crc32(Z,&lt;&lt; &gt;&gt;), Datas)</pre>
+ <warning>
+ <p>This function is deprecated and will be removed in a future
+ release. Use <seealso marker="erts:erlang#crc32/2">
+ <c>erlang:crc32/2</c></seealso> instead.</p>
+ </warning>
</desc>
</func>
@@ -202,6 +240,11 @@ Crc = lists:foldl(fun(Data,Crc0) ->
<p>This function returns the <c><anno>CRC</anno></c> checksum of
<c>[Data1,Data2]</c>, requiring only <c><anno>CRC1</anno></c>,
<c><anno>CRC2</anno></c>, and <c><anno>Size2</anno></c>.</p>
+ <warning>
+ <p>This function is deprecated and will be removed in a future
+ release. Use <seealso marker="erts:erlang#crc32_combine/3">
+ <c>erlang:crc32_combine/3</c></seealso> instead.</p>
+ </warning>
</desc>
</func>
@@ -407,8 +450,8 @@ list_to_binary([B1,B2])</pre>
<seealso marker="#deflateInit/1"><c>deflateInit/1,2,6</c></seealso> or
<seealso marker="#deflateReset/1"><c>deflateReset/1</c></seealso>,
before any call of
- <seealso marker="#deflate/3"><c>deflate/3</c></seealso>.
- The compressor and decompressor must use the same dictionary (see
+ <seealso marker="#deflate/3"><c>deflate/3</c></seealso>.</p>
+ <p>The compressor and decompressor must use the same dictionary (see
<seealso marker="#inflateSetDictionary/2">
<c>inflateSetDictionary/2</c></seealso>).</p>
<p>The Adler checksum of the dictionary is returned.</p>
@@ -420,6 +463,10 @@ list_to_binary([B1,B2])</pre>
<fsummary>Get buffer size.</fsummary>
<desc>
<p>Gets the size of the intermediate buffer.</p>
+ <warning>
+ <p>This function is deprecated and will be removed in a future
+ release.</p>
+ </warning>
</desc>
</func>
@@ -443,14 +490,31 @@ list_to_binary([B1,B2])</pre>
<name name="inflate" arity="2"/>
<fsummary>Decompress data.</fsummary>
<desc>
- <p>Decompresses as much data as possible.
- It can introduce some output latency (reading
- input without producing any output).</p>
- <p>If a preset dictionary is needed at this point (see
- <seealso marker="#inflateSetDictionary/2">
- <c>inflateSetDictionary/2</c></seealso>), <c>inflate/2</c> throws a
- <c>{need_dictionary,Adler}</c> exception, where <c>Adler</c> is
- the Adler-32 checksum of the dictionary chosen by the compressor.</p>
+ <p>Equivalent to
+ <seealso marker="#inflate/3"><c>inflate(Z, Data, [])</c></seealso>
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="inflate" arity="3"/>
+ <fsummary>Decompress data.</fsummary>
+ <desc>
+ <p>Decompresses as much data as possible. It can introduce some output
+ latency (reading input without producing any output).</p>
+ <p>Currently the only available option is
+ <c>{exception_on_need_dict,boolean()}</c> which controls whether the
+ function should throw an exception when a preset dictionary is
+ required for decompression. When set to false, a
+ <c>need_dictionary</c> tuple will be returned instead. See
+ <seealso marker="#inflateSetDictionary/2">
+ <c>inflateSetDictionary/2</c></seealso> for details.</p>
+ <warning>
+ <p>This option defaults to <c>true</c> for backwards compatibility
+ but we intend to remove the exception behavior in a future
+ release. New code that needs to handle dictionaries manually
+ should always specify <c>{exception_on_need_dict,false}</c>.</p>
+ </warning>
</desc>
</func>
@@ -458,6 +522,11 @@ list_to_binary([B1,B2])</pre>
<name name="inflateChunk" arity="1"/>
<fsummary>Read next uncompressed chunk.</fsummary>
<desc>
+ <warning>
+ <p>This function is deprecated and will be removed in a future
+ release. Use <seealso marker="#safeInflate/2"><c>safeInflate/2</c>
+ </seealso> instead.</p>
+ </warning>
<p>Reads the next chunk of uncompressed data, initialized by
<seealso marker="#inflateChunk/2"><c>inflateChunk/2</c></seealso>.</p>
<p>This function is to be repeatedly called, while it returns
@@ -469,23 +538,27 @@ list_to_binary([B1,B2])</pre>
<name name="inflateChunk" arity="2"/>
<fsummary>Decompress data with limited output size.</fsummary>
<desc>
+ <warning>
+ <p>This function is deprecated and will be removed in a future
+ release. Use <seealso marker="#safeInflate/2"><c>safeInflate/2</c>
+ </seealso> instead.</p>
+ </warning>
<p>Like <seealso marker="#inflate/2"><c>inflate/2</c></seealso>,
- but decompresses no more data than will fit in the buffer configured
- through <seealso marker="#setBufSize/2"><c>setBufSize/2</c></seealso>.
- Is is useful when decompressing a stream with a high compression
- ratio, such that a small amount of compressed input can expand up to
- 1000 times.</p>
+ but decompresses no more data than will fit in the buffer configured
+ through <seealso marker="#setBufSize/2"><c>setBufSize/2</c>
+ </seealso>. Is is useful when decompressing a stream with a high
+ compression ratio, such that a small amount of compressed input can
+ expand up to 1000 times.</p>
<p>This function returns <c>{more, Decompressed}</c>, when there is
- more output available, and
- <seealso marker="#inflateChunk/1"><c>inflateChunk/1</c></seealso>
- is to be used to read it.</p>
- <p>This function can introduce some output latency (reading
- input without producing any output).</p>
- <p>If a preset dictionary is needed at this point (see
- <seealso marker="#inflateSetDictionary/2">
- <c>inflateSetDictionary/2</c></seealso>), this function throws a
- <c>{need_dictionary,Adler}</c> exception, where <c>Adler</c> is
- the Adler-32 checksum of the dictionary chosen by the compressor.</p>
+ more output available, and
+ <seealso marker="#inflateChunk/1"><c>inflateChunk/1</c></seealso>
+ is to be used to read it.</p>
+ <p>This function can introduce some output latency (reading input
+ without producing any output).</p>
+ <p>An exception will be thrown if a preset dictionary is required for
+ further decompression. See
+ <seealso marker="#inflateSetDictionary/2">
+ <c>inflateSetDictionary/2</c></seealso> for details.</p>
<p>Example:</p>
<pre>
walk(Compressed, Handler) ->
@@ -517,6 +590,18 @@ loop(Z, Handler, Uncompressed) ->
</func>
<func>
+ <name name="inflateGetDictionary" arity="1"/>
+ <fsummary>Return the decompression dictionary.</fsummary>
+ <desc>
+ <p>Returns the decompression dictionary currently in use
+ by the stream. This function must be called between
+ <seealso marker="#inflateInit/1"><c>inflateInit/1,2</c></seealso>
+ and <seealso marker="#inflateEnd/1"><c>inflateEnd</c></seealso>.</p>
+ <p>Only supported if ERTS was compiled with zlib >= 1.2.8.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="inflateInit" arity="1"/>
<fsummary>Initialize a session for decompression.</fsummary>
<desc>
@@ -562,45 +647,83 @@ loop(Z, Handler, Uncompressed) ->
<fsummary>Initialize the decompression dictionary.</fsummary>
<desc>
<p>Initializes the decompression dictionary from the specified
- uncompressed byte sequence. This function must be called
- immediately after a call of
- <seealso marker="#inflate/2"><c>inflate/2</c></seealso>
- if this call threw a <c>{need_dictionary,Adler}</c> exception.
- The dictionary chosen by the compressor can be determined from the
- Adler value thrown by the call to <c>inflate/2</c>.
- The compressor and decompressor must use the same dictionary (see
- <seealso marker="#deflateSetDictionary/2">
- <c>deflateSetDictionary/2</c></seealso>).</p>
+ uncompressed byte sequence. This function must be called as a
+ response to an inflate operation (eg.
+ <seealso marker="#safeInflate/2"><c>safeInflate/2</c></seealso>)
+ returning <c>{need_dictionary,Adler,Output}</c> or in the case of
+ deprecated functions, throwing an
+ <c>{'EXIT',{{need_dictionary,Adler},_StackTrace}}</c> exception.</p>
+ <p>The dictionary chosen by the compressor can be determined from the
+ Adler value returned or thrown by the call to the inflate function.
+ The compressor and decompressor must use the same dictionary (See
+ <seealso marker="#deflateSetDictionary/2">
+ <c>deflateSetDictionary/2</c></seealso>).</p>
+ <p>After setting the dictionary the inflate operation should be
+ retried without new input.</p>
<p>Example:</p>
<pre>
-unpack(Z, Compressed, Dict) ->
+deprecated_unpack(Z, Compressed, Dict) ->
case catch zlib:inflate(Z, Compressed) of
- {'EXIT',{{need_dictionary,DictID},_}} ->
- zlib:inflateSetDictionary(Z, Dict),
+ {'EXIT',{{need_dictionary,_DictID},_}} ->
+ ok = zlib:inflateSetDictionary(Z, Dict),
Uncompressed = zlib:inflate(Z, []);
Uncompressed ->
Uncompressed
- end.</pre>
+ end.
+
+new_unpack(Z, Compressed, Dict) ->
+ case zlib:inflate(Z, Compressed, [{exception_on_need_dict, false}]) of
+ {need_dictionary, _DictId, Output} ->
+ ok = zlib:inflateSetDictionary(Z, Dict),
+ [Output | zlib:inflate(Z, [])];
+ Uncompressed ->
+ Uncompressed
+ end.</pre>
</desc>
</func>
<func>
- <name name="inflateGetDictionary" arity="1"/>
- <fsummary>Return the decompression dictionary.</fsummary>
+ <name name="open" arity="0"/>
+ <fsummary>Open a stream and return a stream reference.</fsummary>
<desc>
- <p>Returns the decompression dictionary currently in use
- by the stream. This function must be called between
- <seealso marker="#inflateInit/1"><c>inflateInit/1,2</c></seealso>
- and <seealso marker="#inflateEnd/1"><c>inflateEnd</c></seealso>.</p>
- <p>Only supported if ERTS was compiled with zlib >= 1.2.8.</p>
+ <p>Opens a zlib stream.</p>
</desc>
</func>
<func>
- <name name="open" arity="0"/>
- <fsummary>Open a stream and return a stream reference.</fsummary>
+ <name name="safeInflate" arity="2"/>
+ <fsummary>Decompress data with limited output size.</fsummary>
<desc>
- <p>Opens a zlib stream.</p>
+ <p>Like <seealso marker="#inflate/2"><c>inflate/2</c></seealso>,
+ but returns once it has expanded beyond a small
+ implementation-defined threshold. It's useful when decompressing
+ untrusted input which could have been maliciously crafted to expand
+ until the system runs out of memory.</p>
+ <p>This function returns <c>{continue | finished, Output}</c>, where
+ <anno>Output</anno> is the data that was decompressed in this call.
+ New input can be queued up on each call if desired, and the function
+ will return <c>{finished, Output}</c> once all queued data has been
+ decompressed.</p>
+ <p>This function can introduce some output latency (reading
+ input without producing any output).</p>
+ <p>If a preset dictionary is required for further decompression, this
+ function returns a <c>need_dictionary</c> tuple. See
+ <seealso marker="#inflateSetDictionary/2">
+ <c>inflateSetDictionary/2</c></seealso>) for details.</p>
+ <p>Example:</p>
+ <pre>
+walk(Compressed, Handler) ->
+ Z = zlib:open(),
+ zlib:inflateInit(Z),
+ loop(Z, Handler, zlib:safeInflate(Z, Compressed)),
+ zlib:inflateEnd(Z),
+ zlib:close(Z).
+
+loop(Z, Handler, {continue, Output}) ->
+ Handler(Output),
+ loop(Z, Handler, zlib:safeInflate(Z, []));
+loop(Z, Handler, {finished, Output}) ->
+ Handler(Output).</pre>
</desc>
</func>
@@ -609,6 +732,10 @@ unpack(Z, Compressed, Dict) ->
<fsummary>Set buffer size.</fsummary>
<desc>
<p>Sets the intermediate buffer size.</p>
+ <warning>
+ <p>This function is deprecated and will be removed in a future
+ release.</p>
+ </warning>
</desc>
</func>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 61c1e14741..1916b97a89 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -116,6 +116,7 @@ ifeq ($(TYPE),lcnt)
PURIFY =
TYPEMARKER = .lcnt
TYPE_FLAGS = @CFLAGS@ -DERTS_ENABLE_LOCK_COUNT
+ENABLE_ALLOC_TYPE_VARS += lcnt
else
ifeq ($(TYPE),frmptr)
@@ -814,17 +815,19 @@ RUN_OBJS = \
$(OBJDIR)/erl_bif_binary.o $(OBJDIR)/erl_ao_firstfit_alloc.o \
$(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o \
$(OBJDIR)/erl_ptab.o $(OBJDIR)/erl_map.o \
- $(OBJDIR)/erl_msacc.o
+ $(OBJDIR)/erl_msacc.o $(OBJDIR)/erl_lock_flags.o \
+ $(OBJDIR)/erl_io_queue.o
LTTNG_OBJS = $(OBJDIR)/erlang_lttng.o
-NIF_OBJS = $(OBJDIR)/erl_tracer_nif.o
+NIF_OBJS = \
+ $(OBJDIR)/erl_tracer_nif.o \
+ $(OBJDIR)/zlib_nif.o
ifeq ($(TARGET),win32)
DRV_OBJS = \
$(OBJDIR)/registry_drv.o \
$(OBJDIR)/efile_drv.o \
$(OBJDIR)/inet_drv.o \
- $(OBJDIR)/zlib_drv.o \
$(OBJDIR)/ram_file_drv.o \
$(OBJDIR)/ttsl_drv.o
OS_OBJS = \
@@ -854,7 +857,6 @@ OS_OBJS = \
DRV_OBJS = \
$(OBJDIR)/efile_drv.o \
$(OBJDIR)/inet_drv.o \
- $(OBJDIR)/zlib_drv.o \
$(OBJDIR)/ram_file_drv.o \
$(OBJDIR)/ttsl_drv.o
endif
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index 2055c29190..38e02c386f 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -138,7 +138,7 @@ atom_hash(Atom* obj)
while(len--) {
v = *p++;
/* latin1 clutch for r16 */
- if ((v & 0xFE) == 0xC2 && (*p & 0xC0) == 0x80) {
+ if (len && (v & 0xFE) == 0xC2 && (*p & 0xC0) == 0x80) {
v = (v << 6) | (*p & 0x3F);
p++; len--;
}
@@ -442,7 +442,8 @@ init_atom_table(void)
erts_smp_atomic_init_nob(&atom_put_ops, 0);
#endif
- erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab");
+ erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) atom_hash;
f.cmp = (HCMP_FUN) atom_cmp;
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 007bf99b6e..14ddb74324 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -97,7 +97,8 @@ init_purge_state(void)
{
purge_state.module = THE_NON_VALUE;
- erts_smp_mtx_init(&purge_state.mtx, "purge_state");
+ erts_smp_mtx_init(&purge_state.mtx, "purge_state", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
purge_state.pending_purge_lambda =
erts_export_put(am_erts_code_purger, am_pending_purge_lambda, 3);
@@ -118,7 +119,9 @@ init_purge_state(void)
void
erts_beam_bif_load_init(void)
{
- erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas");
+ erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
release_literal_areas.first = NULL;
release_literal_areas.last = NULL;
erts_smp_atomic_init_nob(&erts_copy_literal_area__,
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index b9453c1d9a..950639f7ae 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -165,7 +165,8 @@ erts_bp_init(void) {
erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index");
+ erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
#endif
}
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 79d751d13e..bc83699951 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -6829,7 +6829,7 @@ apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
}
if (is_not_nil(tmp)) { /* Must be well-formed list */
- p->freason = EXC_UNDEF;
+ p->freason = EXC_BADARG;
return NULL;
}
reg[arity] = fun;
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 40dd4129d2..b6595d2a5d 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -62,9 +62,6 @@ static erts_smp_atomic32_t msacc;
static Export *await_sched_wall_time_mod_trap;
static erts_smp_atomic32_t sched_wall_time;
-static erts_smp_mtx_t ports_snapshot_mtx;
-erts_smp_atomic_t erts_dead_ports_ptr; /* To store dying ports during snapshot */
-
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
/*
@@ -3021,17 +3018,17 @@ BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
{
Eterm res;
byte *buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_SZ_LIMIT);
- Sint i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS);
-
+ Sint written;
+ int i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS,
+ &written);
if (i < 0) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
- i = erts_list_length(BIF_ARG_1);
- if (i > MAX_ATOM_CHARACTERS) {
+ if (i == -2) {
BIF_ERROR(BIF_P, SYSTEM_LIMIT);
}
BIF_ERROR(BIF_P, BADARG);
}
- res = erts_atom_put(buf, i, ERTS_ATOM_ENC_UTF8, 1);
+ res = erts_atom_put(buf, written, ERTS_ATOM_ENC_UTF8, 1);
ASSERT(is_atom(res));
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(res);
@@ -3042,8 +3039,9 @@ BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
BIF_RETTYPE list_to_existing_atom_1(BIF_ALIST_1)
{
byte *buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_SZ_LIMIT);
- Sint i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS);
-
+ Sint written;
+ int i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS,
+ &written);
if (i < 0) {
error:
erts_free(ERTS_ALC_T_TMP, (void *) buf);
@@ -3051,7 +3049,7 @@ BIF_RETTYPE list_to_existing_atom_1(BIF_ALIST_1)
} else {
Eterm a;
- if (erts_atom_get((char *) buf, i, &a, ERTS_ATOM_ENC_UTF8)) {
+ if (erts_atom_get((char *) buf, written, &a, ERTS_ATOM_ENC_UTF8)) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(a);
} else {
@@ -3946,15 +3944,18 @@ BIF_RETTYPE display_string_1(BIF_ALIST_1)
{
Process* p = BIF_P;
Eterm string = BIF_ARG_1;
- Sint len = is_string(string);
- char *str;
+ Sint len = erts_unicode_list_to_buf_len(string);
+ Sint written;
+ byte *str;
+ int res;
- if (len <= 0) {
+ if (len < 0) {
BIF_ERROR(p, BADARG);
}
- str = (char *) erts_alloc(ERTS_ALC_T_TMP, sizeof(char)*(len + 1));
- if (intlist_to_buf(string, str, len) != len)
- erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ str = (byte *) erts_alloc(ERTS_ALC_T_TMP, sizeof(char)*(len + 1));
+ res = erts_unicode_list_to_buf(string, str, len, &written);
+ if (res != 0 || written != len)
+ erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error (%d)\n", __FILE__, __LINE__, res);
str[len] = '\0';
erts_fprintf(stderr, "%s", str);
erts_free(ERTS_ALC_T_TMP, (void *) str);
@@ -3970,9 +3971,6 @@ BIF_RETTYPE display_nl_0(BIF_ALIST_0)
/**********************************************************************/
-#define HALT_MSG_SIZE 200
-static char halt_msg[HALT_MSG_SIZE+1];
-
/* stop the system with exit code and flags */
BIF_RETTYPE halt_2(BIF_ALIST_2)
{
@@ -4022,16 +4020,17 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_exit(ERTS_ABORT_EXIT, "");
}
- else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
- Sint i;
+ else if (is_list(BIF_ARG_1) || BIF_ARG_1 == NIL) {
+# define HALT_MSG_SIZE 200
+ static byte halt_msg[4*HALT_MSG_SIZE+1];
+ Sint written;
- if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE)) == -1) {
+ if (erts_unicode_list_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE,
+ &written) == -1 ) {
goto error;
}
- if (i == -2) /* truncated string */
- i = HALT_MSG_SIZE;
- ASSERT(i >= 0 && i <= HALT_MSG_SIZE);
- halt_msg[i] = '\0';
+ ASSERT(written >= 0 && written < sizeof(halt_msg));
+ halt_msg[written] = '\0';
VERBOSE(DEBUG_SYSTEM,
("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
@@ -5138,9 +5137,6 @@ void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
void erts_init_bif(void)
{
- erts_smp_mtx_init(&ports_snapshot_mtx, "ports_snapshot");
- erts_smp_atomic_init_nob(&erts_dead_ports_ptr, (erts_aint_t) NULL);
-
/*
* bif_return_trap/2 is a hidden BIF that bifs that need to
* yield the calling process traps to.
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index a8bbf5f8c1..10ca0b5066 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -437,7 +437,10 @@ bif erts_debug:dump_links/1
#
# Lock counter bif's
#
-bif erts_debug:lock_counters/1
+bif erts_debug:lcnt_control/2
+bif erts_debug:lcnt_control/1
+bif erts_debug:lcnt_collect/0
+bif erts_debug:lcnt_clear/0
#
# New Bifs in R8.
@@ -676,3 +679,9 @@ bif math:ceil/1
bif math:fmod/2
bif os:set_signal/2
bif erts_internal:maps_to_list/2
+
+#
+# New in 20.1
+#
+
+bif erlang:iolist_to_iovec/1
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 7128b8ed23..5eaf262cd8 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1293,8 +1293,11 @@ static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
*r++ = ~c ^ *y++;
x++;
}
- while(xl--)
- *r++ = ~*x++;
+ while(xl--) {
+ DSUBb(*x,0,b,c);
+ *r++ = ~c;
+ x++;
+ }
}
else {
ErtsDigit b1, b2;
@@ -1312,7 +1315,9 @@ static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
x++; y++;
}
while(xl--) {
- *r++ = *x++;
+ DSUBb(*x,0,b1,c1);
+ *r++ = c1;
+ x++;
}
}
}
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index ec6267711b..8a3d1b20b4 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -57,7 +57,8 @@ void erts_code_ix_init(void)
*/
erts_smp_atomic32_init_nob(&the_active_code_index, 0);
erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
- erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission");
+ erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_tsd_key_create(&has_code_write_permission,
"erts_has_code_write_permission");
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 982f1066df..09fdb897f5 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -3223,7 +3223,8 @@ static ErtsNodesMonitor *nodes_monitors_end;
static void
init_nodes_monitors(void)
{
- erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors");
+ erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
nodes_monitors = NULL;
nodes_monitors_end = NULL;
}
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 169e1e423d..c7ab444c96 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -3819,7 +3819,8 @@ hdbg_init(void)
hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
free_hdbg_mblks = &hdbg_mblks[0];
used_hdbg_mblks = NULL;
- erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug");
+ erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
}
static void *check_memory_fence(void *ptr,
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index 7b5cbe2178..97a1cf1308 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -344,7 +344,8 @@ ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
#define ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
static erts_smp_spinlock_t NAME##_lck; \
ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
+ erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock", NIL, \
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
erts_smp_spin_lock(&NAME##_lck), \
erts_smp_spin_unlock(&NAME##_lck))
@@ -358,7 +359,8 @@ ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT)
#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
static erts_mtx_t NAME##_lck; \
ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- erts_mtx_init(NAME##_lck, #NAME "_alloc_lock"), \
+ erts_mtx_init(NAME##_lck, #NAME "_alloc_lock", NIL, \
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
erts_mtx_lock(&NAME##_lck), \
erts_mtx_unlock(&NAME##_lck))
@@ -371,7 +373,8 @@ ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
#define ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) \
static erts_spinlock_t NAME##_lck; \
ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, \
- erts_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
+ erts_spinlock_init(&NAME##_lck, #NAME "_alloc_lock", NIL, \
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
erts_spin_lock(&NAME##_lck), \
erts_spin_unlock(&NAME##_lck))
@@ -448,7 +451,7 @@ NAME##_free(TYPE *p) \
}
#ifdef DEBUG
-#define ERTS_PRE_ALLOC_SIZE(SZ) 2
+#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) < 1000 ? (SZ)/10 + 10 : 100)
#define ERTS_PRE_ALLOC_CLOBBER(P, T) memset((void *) (P), 0xfd, sizeof(T))
#else
#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) > 1 ? (SZ) : 1)
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 8a23a1526e..50a1d97dd5 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -368,6 +368,13 @@ type SSB SHORT_LIVED PROCESSES ssb
+endif
++if lcnt
+
+type LCNT_CARRIER STANDARD SYSTEM lcnt_lock_info_carrier
+type LCNT_VECTOR SHORT_LIVED SYSTEM lcnt_sample_vector
+
++endif
+
type DEBUG SHORT_LIVED SYSTEM debugging
type DDLL_PROCESS STANDARD SYSTEM ddll_processes
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 6fddba4b34..af86ad0548 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -6135,16 +6135,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (init->ts) {
allctr->thread_safe = 1;
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x_opt(&allctr->mutex,
- "alcu_allocator",
- make_small(allctr->alloc_no),
- ERTS_LCNT_LT_ALLOC);
-#else
- erts_mtx_init_x(&allctr->mutex,
- "alcu_allocator",
- make_small(allctr->alloc_no));
-#endif /*ERTS_ENABLE_LOCK_COUNT*/
+ erts_mtx_init(&allctr->mutex, "alcu_allocator", make_small(allctr->alloc_no),
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
#ifdef DEBUG
allctr->debug.saved_tid = 0;
@@ -6324,7 +6316,8 @@ erts_alcu_init(AlcUInit_t *init)
carrier_alignment = sizeof(Unit_t);
#endif
- erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms");
+ erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
atoms_initialized = 0;
initialized = 1;
@@ -6592,3 +6585,45 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
#endif /* ERTS_ALLOC_UTIL_HARD_DEBUG */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+static void lcnt_enable_allocator_lock_count(Allctr_t *allocator, int enable) {
+ if(!allocator->thread_safe) {
+ return;
+ }
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&allocator->mutex.lcnt,
+ "alcu_allocator", make_small(allocator->alloc_no),
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+ } else {
+ erts_lcnt_uninstall(&allocator->mutex.lcnt);
+ }
+}
+
+static void lcnt_update_thread_spec_locks(ErtsAllocatorThrSpec_t *tspec, int enable) {
+ if(tspec->enabled) {
+ int i;
+
+ for(i = 0; i < tspec->size; i++) {
+ lcnt_enable_allocator_lock_count(tspec->allctr[i], enable);
+ }
+ }
+}
+
+void erts_lcnt_update_allocator_locks(int enable) {
+ int i;
+
+ for(i = ERTS_ALC_A_MIN; i < ERTS_ALC_A_MAX; i++) {
+ ErtsAllocatorInfo_t *ai = &erts_allctrs_info[i];
+
+ if(ai->enabled && ai->alloc_util) {
+ if(ai->thr_spec) {
+ lcnt_update_thread_spec_locks((ErtsAllocatorThrSpec_t*)ai->extra, enable);
+ } else {
+ lcnt_enable_allocator_lock_count((Allctr_t*)ai->extra, enable);
+ }
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index e889980fa4..73c467aa0a 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -220,6 +220,10 @@ void* erts_alcu_literal_32_sys_realloc(Allctr_t*, void *ptr, Uint *size_p, Uint
void erts_alcu_literal_32_sys_dealloc(Allctr_t*, void *ptr, Uint size, int superalign);
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_update_allocator_locks(int enable);
+#endif
+
#endif /* !ERL_ALLOC_UTIL__ */
#if defined(GET_ERL_ALLOC_UTIL_IMPL) && !defined(ERL_ALLOC_UTIL_IMPL__)
@@ -673,7 +677,6 @@ void erts_alcu_assert_failed(char* expr, char* file, int line, char *func);
int is_sbc_blk(Block_t*);
#endif
-
#endif /* #if defined(GET_ERL_ALLOC_UTIL_IMPL)
&& !defined(ERL_ALLOC_UTIL_IMPL__) */
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index 84254af0c2..9a93034fcb 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -194,7 +194,8 @@ erts_init_async(void)
ptr += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
async->init.data.no_initialized = 0;
- erts_mtx_init(&async->init.data.mtx, "async_init_mtx");
+ erts_mtx_init(&async->init.data.mtx, "async_init_mtx", NIL,
+ ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
erts_cnd_init(&async->init.data.cnd);
erts_atomic_init_nob(&async->init.data.id, 0);
@@ -213,7 +214,8 @@ erts_init_async(void)
for (i = 1; i <= erts_no_schedulers; i++) {
ErtsAsyncReadyQ *arq = async_ready_q(i);
#if ERTS_USE_ASYNC_READY_ENQ_MTX
- erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx");
+ erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
#endif
erts_thr_q_finalize_dequeue_state_init(&arq->fin_deq);
qinit.arg = (void *) (SWord) i;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index e2773475b0..0547b4d75c 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -48,6 +48,7 @@
#include "erl_map.h"
#define ERTS_PTAB_WANT_DEBUG_FUNCS__
#include "erl_ptab.h"
+#include "erl_time.h"
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -3544,24 +3545,32 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_runtime) {
- UWord u1, u2, dummy;
+ ErtsMonotonicTime u1, u2;
Eterm b1, b2;
- elapsed_time_both(&u1,&dummy,&u2,&dummy);
- b1 = erts_make_integer(u1,BIF_P);
- b2 = erts_make_integer(u2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ Uint hsz;
+ erts_runtime_elapsed_both(&u1, NULL, &u2, NULL);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, u1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, u2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, u1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, u2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_run_queue) {
res = erts_run_queues_len(NULL, 1, 0, 0);
BIF_RET(make_small(res));
} else if (BIF_ARG_1 == am_wall_clock) {
- UWord w1, w2;
+ ErtsMonotonicTime w1, w2;
Eterm b1, b2;
- wall_clock_elapsed_time_both(&w1, &w2);
- b1 = erts_make_integer((Uint) w1,BIF_P);
- b2 = erts_make_integer((Uint) w2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ Uint hsz;
+ erts_wall_clock_elapsed_both(&w1, &w2);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, w1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, w2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, w1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, w2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_io) {
@@ -4444,48 +4453,120 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
#ifdef ERTS_ENABLE_LOCK_COUNT
+
+typedef struct {
+ /* info->location_count may increase between size calculation and term
+ * building, so we cap it at the value sampled in lcnt_build_result_vector.
+ *
+ * Shrinking is safe though. */
+ int max_location_count;
+ erts_lcnt_lock_info_t *info;
+} lcnt_sample_t;
+
+typedef struct lcnt_sample_vector_ {
+ lcnt_sample_t *elements;
+ size_t size;
+} lcnt_sample_vector_t;
+
+static lcnt_sample_vector_t lcnt_build_sample_vector(erts_lcnt_lock_info_list_t *list) {
+ erts_lcnt_lock_info_t *iterator;
+ lcnt_sample_vector_t result;
+ size_t allocated_entries;
+
+ allocated_entries = 64;
+ result.size = 0;
+
+ result.elements = erts_alloc(ERTS_ALC_T_LCNT_VECTOR,
+ allocated_entries * sizeof(lcnt_sample_t));
+
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(list, &iterator)) {
+ erts_lcnt_retain_lock_info(iterator);
+
+ result.elements[result.size].max_location_count = iterator->location_count;
+ result.elements[result.size].info = iterator;
+
+ result.size++;
+
+ if(result.size >= allocated_entries) {
+ allocated_entries *= 2;
+
+ result.elements = erts_realloc(ERTS_ALC_T_LCNT_VECTOR, result.elements,
+ allocated_entries * sizeof(lcnt_sample_t));
+ }
+ }
+
+ return result;
+}
+
+static void lcnt_destroy_sample_vector(lcnt_sample_vector_t *vector) {
+ size_t i;
+
+ for(i = 0; i < vector->size; i++) {
+ erts_lcnt_release_lock_info(vector->elements[i].info);
+ }
+
+ erts_free(ERTS_ALC_T_LCNT_VECTOR, vector->elements);
+}
+
+/* The size of an integer is not guaranteed to be constant since we're walking
+ * over live data, and may cross over into bignum territory between size calc
+ * and the actual build. This takes care of that through always assuming the
+ * worst, but needs to be fixed up with HRelease once the final term has been
+ * built. */
+static ERTS_INLINE Eterm bld_unstable_uint64(Uint **hpp, Uint *szp, Uint64 ui) {
+ Eterm res = THE_NON_VALUE;
+
+ if(szp) {
+ *szp += ERTS_UINT64_HEAP_SIZE(~((Uint64) 0));
+ }
+
+ if(hpp) {
+ if (IS_USMALL(0, ui)) {
+ res = make_small(ui);
+ } else {
+ res = erts_uint64_to_big(ui, hpp);
+ }
+ }
+
+ return res;
+}
+
static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
- Uint tries = 0, colls = 0;
- unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
- unsigned int line = 0;
unsigned int i;
-
+ const char *file;
+
Eterm af, uil;
Eterm uit, uic;
Eterm uits, uitns, uitn;
Eterm tt, tstat, tloc, t;
Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
-
+
/* term:
- * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}},
- * { .. histogram .. }]
- */
+ * [{{file, line},
+ {tries, colls, {seconds, nanoseconds, n_blocks}},
+ * { .. histogram .. }] */
- tries = (Uint) ethr_atomic_read(&stats->tries);
- colls = (Uint) ethr_atomic_read(&stats->colls);
-
- line = stats->line;
- timer_s = stats->timer.s;
- timer_ns = stats->timer.ns;
- timer_n = stats->timer_n;
-
- af = erts_atom_put((byte *)stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
- uil = erts_bld_uint( hpp, szp, line);
+ file = stats->file ? stats->file : "undefined";
+
+ af = erts_atom_put((byte *)file, strlen(file), ERTS_ATOM_ENC_LATIN1, 1);
+ uil = erts_bld_uint( hpp, szp, stats->line);
tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
-
- uit = erts_bld_uint( hpp, szp, tries);
- uic = erts_bld_uint( hpp, szp, colls);
- uits = erts_bld_uint( hpp, szp, timer_s);
- uitns = erts_bld_uint( hpp, szp, timer_ns);
- uitn = erts_bld_uint( hpp, szp, timer_n);
+ uit = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->attempts));
+ uic = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->collisions));
+
+ uits = bld_unstable_uint64(hpp, szp, stats->total_time_waited.s);
+ uitns = bld_unstable_uint64(hpp, szp, stats->total_time_waited.ns);
+ uitn = bld_unstable_uint64(hpp, szp, stats->times_waited);
tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
- vhist[i] = erts_bld_uint(hpp, szp, stats->hist.ns[i]);
+ vhist[i] = bld_unstable_uint64(hpp, szp, stats->wait_time_histogram.ns[i]);
}
+
thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
@@ -4494,185 +4575,266 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
return res;
}
-static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock, Eterm res) {
+static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) {
+ Eterm id = info->id;
+
+ if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_TYPE_PROCLOCK) {
+ /* Use registered names as id's for process locks if available. Thread
+ * progress is delayed since we may be running on a dirty scheduler. */
+ ErtsThrPrgrDelayHandle delay_handle;
+ Process *process;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+
+ process = erts_proc_lookup(info->id);
+ if (process && process->common.u.alive.reg) {
+ id = process->common.u.alive.reg->name;
+ }
+
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ } else if(info->flags & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
+ if(is_small(id) && !sys_strcmp(info->name, "alcu_allocator")) {
+ const char *name = (const char*)ERTS_ALC_A2AD(signed_val(id));
+ id = erts_atom_put((byte*)name, strlen(name), ERTS_ATOM_ENC_LATIN1, 1);
+ }
+ }
+
+ return id;
+}
+
+static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, lcnt_sample_t *sample, Eterm res) {
+ erts_lcnt_lock_info_t *info = sample->info;
+
Eterm name, type, id, stats = NIL, t;
- Process *proc = NULL;
- char *ltype;
+ const char *lock_desc;
int i;
+
+ /* term: [{name, id, type, stats()}] */
+
+ ASSERT(info->name);
- /* term:
- * [{name, id, type, stats()}]
- */
-
- ASSERT(lock->name);
-
- ltype = erts_lcnt_lock_type(lock->flag);
-
- ASSERT(ltype);
-
- type = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- name = erts_atom_put((byte *)lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
-
- if (lock->flag & ERTS_LCNT_LT_ALLOC) {
- /* use allocator types names as id's for allocator locks */
- ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
- id = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
- /* use registered names as id's for process locks if available */
- proc = erts_proc_lookup(lock->id);
- if (proc && proc->common.u.alive.reg) {
- id = proc->common.u.alive.reg->name;
- } else {
- /* otherwise use process id */
- id = lock->id;
- }
+ lock_desc = erts_lock_flags_get_type_name(info->flags);
+
+ type = erts_atom_put((byte*)lock_desc, strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1);
+ name = erts_atom_put((byte*)info->name, strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1);
+
+ /* Only attempt to resolve ids when actually emitting the term. This ought
+ * to be safe since all immediates are the same size. */
+ if(hpp != NULL) {
+ id = lcnt_pretty_print_lock_id(info);
} else {
- id = lock->id;
+ id = NIL;
}
- for (i = 0; i < lock->n_stats; i++) {
- stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
+ for(i = 0; i < MIN(info->location_count, sample->max_location_count); i++) {
+ stats = lcnt_build_lock_stats_term(hpp, szp, &(info->location_stats[i]), stats);
}
t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
- res = erts_bld_cons( hpp, szp, t, res);
+ res = erts_bld_cons(hpp, szp, t, res);
return res;
}
-static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *data, Eterm res) {
+static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_time_t *duration,
+ lcnt_sample_vector_t *current_locks,
+ lcnt_sample_vector_t *deleted_locks, Eterm res) {
+ const char *str_duration = "duration";
+ const char *str_locks = "locks";
+
Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
- erts_lcnt_lock_t *lock = NULL;
- char *str_duration = "duration";
- char *str_locks = "locks";
-
- /* term:
- * [{'duration', {seconds, nanoseconds}}, {'locks', locks()}]
- */
-
+ size_t i;
+
+ /* term: [{'duration', {seconds, nanoseconds}}, {'locks', locks()}] */
+
/* duration tuple */
- dts = erts_bld_uint( hpp, szp, data->duration.s);
- dtns = erts_bld_uint( hpp, szp, data->duration.ns);
+ dts = bld_unstable_uint64(hpp, szp, duration->s);
+ dtns = bld_unstable_uint64(hpp, szp, duration->ns);
tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
-
+
adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
/* lock tuple */
-
aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
-
- for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+
+ for(i = 0; i < current_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &current_locks->elements[i], lloc);
}
-
- for (lock = data->deleted_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+
+ for(i = 0; i < deleted_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &deleted_locks->elements[i], lloc);
}
-
+
tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
-
- res = erts_bld_cons( hpp, szp, tloc, res);
- res = erts_bld_cons( hpp, szp, tdur, res);
+
+ res = erts_bld_cons(hpp, szp, tloc, res);
+ res = erts_bld_cons(hpp, szp, tdur, res);
return res;
-}
+}
+
+static struct {
+ const char *name;
+ erts_lock_flags_t flag;
+} lcnt_category_map[] = {
+ {"allocator", ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR},
+ {"db", ERTS_LOCK_FLAGS_CATEGORY_DB},
+ {"debug", ERTS_LOCK_FLAGS_CATEGORY_DEBUG},
+ {"distribution", ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION},
+ {"generic", ERTS_LOCK_FLAGS_CATEGORY_GENERIC},
+ {"io", ERTS_LOCK_FLAGS_CATEGORY_IO},
+ {"process", ERTS_LOCK_FLAGS_CATEGORY_PROCESS},
+ {"scheduler", ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER},
+ {NULL, 0}
+ };
+
+static erts_lock_flags_t lcnt_atom_to_lock_category(Eterm atom) {
+ int i = 0;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(erts_is_atom_str(lcnt_category_map[i].name, atom, 0)) {
+ return lcnt_category_map[i].flag;
+ }
+ }
+
+ return 0;
+}
+
+static Eterm lcnt_build_category_list(Eterm **hpp, Uint *szp, erts_lock_flags_t mask) {
+ Eterm res;
+ int i;
+
+ res = NIL;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(mask & lcnt_category_map[i].flag) {
+ Eterm category = erts_atom_put((byte*)lcnt_category_map[i].name,
+ strlen(lcnt_category_map[i].name),
+ ERTS_ATOM_ENC_UTF8, 0);
+
+ res = erts_bld_cons(hpp, szp, category, res);
+ }
+ }
+
+ return res;
+}
+
#endif
-BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
+BIF_RETTYPE erts_debug_lcnt_clear_0(BIF_ALIST_0)
{
-#ifdef ERTS_ENABLE_LOCK_COUNT
- Eterm res = NIL;
-#endif
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
+#else
+ erts_lcnt_clear_counters();
+ BIF_RET(am_ok);
+#endif
+}
- if (BIF_ARG_1 == am_enabled) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- BIF_RET(am_true);
+BIF_RETTYPE erts_debug_lcnt_collect_0(BIF_ALIST_0)
+{
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
#else
- BIF_RET(am_false);
-#endif
- }
-#ifdef ERTS_ENABLE_LOCK_COUNT
+ lcnt_sample_vector_t current_locks, deleted_locks;
+ erts_lcnt_data_t data;
- else if (BIF_ARG_1 == am_info) {
- erts_lcnt_data_t *data;
- Uint hsize = 0;
- Uint *szp;
- Eterm* hp;
+ Eterm *term_heap_start, *term_heap_end;
+ Uint term_heap_size = 0;
+ Eterm result;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ data = erts_lcnt_get_data();
- erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
- data = erts_lcnt_get_data();
+ current_locks = lcnt_build_sample_vector(data.current_locks);
+ deleted_locks = lcnt_build_sample_vector(data.deleted_locks);
- /* calculate size */
+ lcnt_build_result_term(NULL, &term_heap_size, &data.duration,
+ &current_locks, &deleted_locks, NIL);
- szp = &hsize;
- lcnt_build_result_term(NULL, szp, data, NIL);
+ term_heap_start = HAlloc(BIF_P, term_heap_size);
+ term_heap_end = term_heap_start;
- /* alloc and build */
+ result = lcnt_build_result_term(&term_heap_end, NULL,
+ &data.duration, &current_locks, &deleted_locks, NIL);
- hp = HAlloc(BIF_P, hsize);
+ HRelease(BIF_P, term_heap_start + term_heap_size, term_heap_end);
- res = lcnt_build_result_term(&hp, NULL, data, res);
-
- erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
+ lcnt_destroy_sample_vector(&current_locks);
+ lcnt_destroy_sample_vector(&deleted_locks);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
- BIF_RET(res);
- } else if (BIF_ARG_1 == am_clear) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ BIF_RET(result);
+#endif
+}
- erts_lcnt_clear_counters();
+BIF_RETTYPE erts_debug_lcnt_control_1(BIF_ALIST_1)
+{
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t mask;
+ Eterm *term_heap_block;
+ Uint term_heap_size;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ mask = erts_lcnt_get_category_mask();
+ term_heap_size = 0;
- BIF_RET(am_ok);
- } else if (is_tuple(BIF_ARG_1)) {
- Eterm* ptr = tuple_val(BIF_ARG_1);
-
- if ((arityval(ptr[0]) == 2) && (ptr[2] == am_false || ptr[2] == am_true)) {
- int lock_opt = 0, enable = (ptr[2] == am_true) ? 1 : 0;
- if (ERTS_IS_ATOM_STR("copy_save", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_COPYSAVE;
- } else if (ERTS_IS_ATOM_STR("process_locks", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_PROCLOCK;
- } else if (ERTS_IS_ATOM_STR("port_locks", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_PORTLOCK;
- } else if (ERTS_IS_ATOM_STR("suspend", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_SUSPEND;
- } else if (ERTS_IS_ATOM_STR("location", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_LOCATION;
- } else {
- BIF_ERROR(BIF_P, BADARG);
- }
+ lcnt_build_category_list(NULL, &term_heap_size, mask);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ term_heap_block = HAlloc(BIF_P, term_heap_size);
- if (enable) res = erts_lcnt_set_rt_opt(lock_opt) ? am_true : am_false;
- else res = erts_lcnt_clear_rt_opt(lock_opt) ? am_true : am_false;
-
-#ifdef ERTS_SMP
- if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PORTLOCK) {
- erts_lcnt_enable_io_lock_count(enable);
- } else if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PROCLOCK) {
- erts_lcnt_enable_proc_lock_count(enable);
- }
+ BIF_RET(lcnt_build_category_list(&term_heap_block, NULL, mask));
+ } else if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ if(erts_lcnt_get_preserve_info()) {
+ BIF_RET(am_true);
+ }
+
+ BIF_RET(am_false);
+ }
#endif
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(res);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE erts_debug_lcnt_control_2(BIF_ALIST_2)
+{
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t category_mask = 0;
+ Eterm categories = BIF_ARG_2;
+
+ if(!(is_list(categories) || is_nil(categories))) {
+ BIF_ERROR(BIF_P, BADARG);
}
- }
-#endif
+ while(is_list(categories)) {
+ Eterm *cell = list_val(categories);
+ erts_lock_flags_t category;
+
+ category = lcnt_atom_to_lock_category(CAR(cell));
+
+ if(!category) {
+ Eterm *hp = HAlloc(BIF_P, 4);
+
+ BIF_RET(TUPLE3(hp, am_error, am_badarg, CAR(cell)));
+ }
+
+ category_mask |= category;
+ categories = CDR(cell);
+ }
+
+ erts_lcnt_set_category_mask(category_mask);
+
+ BIF_RET(am_ok);
+ } else if(BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
+ int enabled = (BIF_ARG_2 == am_true);
+
+ if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ erts_lcnt_set_preserve_info(enabled);
+
+ BIF_RET(am_ok);
+ }
+ }
+#endif
BIF_ERROR(BIF_P, BADARG);
}
diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c
index fc6fb5f868..2f8adc87d5 100644
--- a/erts/emulator/beam/erl_bif_unique.c
+++ b/erts/emulator/beam/erl_bif_unique.c
@@ -392,7 +392,8 @@ init_magic_ref_tables(void)
erts_snprintf(&tblp->name[0], sizeof(tblp->name),
"magic_ref_table_0");
hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
- erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table");
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
hash_funcs.hash = nsched_mreft_hash;
hash_funcs.cmp = nsched_mreft_cmp;
@@ -402,7 +403,8 @@ init_magic_ref_tables(void)
erts_snprintf(&tblp->name[0], sizeof(tblp->name),
"magic_ref_table_%d", i);
hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
- erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table");
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
}
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 71c64997c1..b4e611f01b 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1321,7 +1321,14 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
goto badarg;
}
}
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ c_p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
used_size_in_bits = erts_bin_offset + build_size_in_bits;
+
sb->is_writable = 0; /* Make sure that no one else can write. */
pb->size = NBYTES(used_size_in_bits);
pb->flags |= PB_ACTIVE_WRITER;
@@ -1395,9 +1402,21 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
goto badarg;
}
}
- used_size_in_bits = erts_bin_offset + build_size_in_bits;
- used_size_in_bytes = NBYTES(used_size_in_bits);
- bin_size = 2*used_size_in_bytes;
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ c_p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
+ used_size_in_bits = erts_bin_offset + build_size_in_bits;
+ used_size_in_bytes = NBYTES(used_size_in_bits);
+
+ if(used_size_in_bits < (ERTS_UINT_MAX / 2)) {
+ bin_size = 2 * used_size_in_bytes;
+ } else {
+ bin_size = NBYTES(ERTS_UINT_MAX);
+ }
+
bin_size = (bin_size < 256) ? 256 : bin_size;
/*
@@ -1487,6 +1506,12 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
* Calculate new size in bytes.
*/
erts_bin_offset = 8*sb->size + sb->bitsize;
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
pos_in_bits_after_build = erts_bin_offset + build_size_in_bits;
pb->size = (pos_in_bits_after_build+7) >> 3;
pb->flags |= PB_ACTIVE_WRITER;
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index 50f33b2014..f8b2fa744f 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -1706,7 +1706,8 @@ erts_init_cpu_topology(void)
{
int ix;
- erts_smp_rwmtx_init(&cpuinfo_rwmtx, "cpu_info");
+ erts_smp_rwmtx_init(&cpuinfo_rwmtx, "cpu_info", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
scheduler2cpu_map = erts_alloc(ERTS_ALC_T_CPUDATA,
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 17e0f2aeec..b83134c79c 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -575,9 +575,7 @@ delete_owned_table(Process *p, DbTable *tb)
table_dec_refc(tb, 1);
}
-
-static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
- char *rwname, char* fixname)
+static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock)
{
#ifdef ERTS_SMP
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
@@ -587,9 +585,10 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
#endif
#ifdef ERTS_SMP
- erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
- rwname, tb->common.the_name);
- erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
+ erts_smp_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_smp_mtx_init(&tb->common.fixlock, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
#endif
}
@@ -1753,8 +1752,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
/* Note, 'type' is *read only* from now on... */
#endif
erts_smp_refc_init(&tb->common.fix_count, 0);
- db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
- "db_tab", "db_tab_fix");
+ db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ));
tb->common.keypos = keypos;
tb->common.owner = BIF_P->common.id;
set_heir(BIF_P, tb, heir, heir_data);
@@ -3391,8 +3389,9 @@ void init_db(ErtsDbSpinCount db_spin_count)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
- erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
- "meta_name_tab", make_small(i));
+ erts_smp_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
+ "meta_name_tab", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DB);
}
#endif
@@ -4334,3 +4333,47 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
return list;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable) {
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&tb->common.rwlock.lcnt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_lcnt_install_new_lock_info(&tb->common.fixlock.lcnt, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(&tb->common.rwlock.lcnt);
+ erts_lcnt_uninstall(&tb->common.fixlock.lcnt);
+ }
+
+ if(IS_HASH_TABLE(tb->common.status)) {
+ erts_lcnt_enable_db_hash_lock_count(&tb->hash, enable);
+ }
+}
+
+static void lcnt_update_db_locks_per_sched(void *enable) {
+ ErtsSchedulerData *esdp;
+ DbTable *head;
+
+ esdp = erts_get_scheduler_data();
+ head = esdp->ets_tables.clist;
+
+ if(head) {
+ DbTable *iterator = head;
+
+ do {
+ if(is_table_alive(iterator)) {
+ erts_lcnt_enable_db_lock_count(iterator, !!enable);
+ }
+
+ iterator = iterator->common.all.next;
+ } while (iterator != head);
+ }
+}
+
+void erts_lcnt_update_db_locks(int enable) {
+ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers,
+ &lcnt_update_db_locks_per_sched, (void*)(UWord)enable);
+}
+
+#endif /* ERTS_ENABLE_LOCK_COUNT */ \ No newline at end of file
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index 4ff9f224e8..d83126b3a2 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -129,6 +129,11 @@ extern erts_smp_atomic_t erts_ets_misc_mem_size;
Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
Uint erts_db_get_max_tabs(void);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable);
+void erts_lcnt_update_db_locks(int enable);
+#endif
+
#endif /* ERL_DB_H__ */
#if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__)
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 0addfaa3c7..ae9322dfd3 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -675,8 +675,8 @@ int db_create_hash(Process *p, DbTable *tbl)
(DbTable *) tb,
sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
- "db_hash_slot", tb->common.the_name);
+ erts_smp_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
+ "db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
}
/* This important property is needed to guarantee the two buckets
* involved in a grow/shrink operation it protected by the same lock:
@@ -3206,3 +3206,23 @@ Eterm erts_ets_hash_sizeof_ext_segtab(void)
return make_small(((SIZEOF_EXT_SEGTAB(0)-1) / sizeof(UWord)) + 1);
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable) {
+ int i;
+
+ if(tb->locks == NULL) {
+ return;
+ }
+
+ for(i = 0; i < DB_HASH_LOCK_CNT; i++) {
+ erts_lcnt_ref_t *ref = &tb->locks->lck_vec[i].lck.lcnt;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(ref, "db_hash_slot", tb->common.the_name,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(ref);
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index f491c85d95..523ed7860e 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -103,4 +103,8 @@ typedef struct {
void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
Eterm erts_ets_hash_sizeof_ext_segtab(void);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable);
+#endif
+
#endif /* _DB_HASH_H */
diff --git a/erts/emulator/beam/erl_dirty_bif.tab b/erts/emulator/beam/erl_dirty_bif.tab
index 69421dcfcc..10c76d2579 100644
--- a/erts/emulator/beam/erl_dirty_bif.tab
+++ b/erts/emulator/beam/erl_dirty_bif.tab
@@ -46,6 +46,11 @@
dirty-cpu erts_debug:dirty_cpu/2
dirty-io erts_debug:dirty_io/2
+# lcnt_control/1 doesn't need to be dirty.
+dirty-cpu erts_debug:lcnt_control/2
+dirty-cpu erts_debug:lcnt_collect/0
+dirty-cpu erts_debug:lcnt_clear/0
+
# --- TEST of Dirty BIF functionality ---
# Functions below will execute on dirty schedulers when emulator has
# been configured for testing dirty schedulers. This is used for test
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 0e8ebf0c98..5ad616fec3 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -40,7 +40,6 @@
#include "erl_drv_nif.h"
#include <stdlib.h>
-#include <sys/types.h> /* ssize_t */
#if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)
#ifndef STATIC_ERLANG_DRIVER
@@ -48,24 +47,6 @@
#define ERL_DRIVER_TYPES_ONLY
#define WIN32_DYNAMIC_ERL_DRIVER
#endif
-/*
- * This structure can be cast to a WSABUF structure.
- */
-typedef struct _SysIOVec {
- unsigned long iov_len;
- char* iov_base;
-} SysIOVec;
-#else /* Unix */
-# ifdef HAVE_SYS_UIO_H
-# include <sys/types.h>
-# include <sys/uio.h>
-typedef struct iovec SysIOVec;
-# else
-typedef struct {
- char* iov_base;
- size_t iov_len;
-} SysIOVec;
-# endif
#endif
#ifndef EXTERN
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index f88138063e..31b4817fb1 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -144,8 +144,25 @@ typedef signed int ErlNapiSInt;
#define ERTS_NAPI_USEC__ 2
#define ERTS_NAPI_NSEC__ 3
-#endif /* __ERL_DRV_NIF_H__ */
-
-
-
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+/*
+ * This structure can be cast to a WSABUF structure.
+ */
+typedef struct _SysIOVec {
+ unsigned long iov_len;
+ char* iov_base;
+} SysIOVec;
+#else /* Unix */
+# include <sys/types.h>
+# ifdef HAVE_SYS_UIO_H
+# include <sys/uio.h>
+typedef struct iovec SysIOVec;
+# else
+typedef struct {
+ char* iov_base;
+ size_t iov_len;
+} SysIOVec;
+# endif
+#endif
+#endif /* __ERL_DRV_NIF_H__ */
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 0e6aadf568..742c428f2a 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -55,7 +55,7 @@ fatal_error(int err, char *func)
struct ErlDrvMutex_ {
ethr_mutex mtx;
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
#endif
char *name;
};
@@ -68,7 +68,7 @@ struct ErlDrvCond_ {
struct ErlDrvRWLock_ {
ethr_rwmutex rwmtx;
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
#endif
char *name;
};
@@ -146,7 +146,8 @@ void erl_drv_thr_init(void)
sizeof(char *)*ERL_DRV_TSD_KEYS_INC);
for (i = 0; i < ERL_DRV_TSD_KEYS_INC; i++)
used_tsd_keys[i] = NULL;
- erts_mtx_init(&tsd_mtx, "drv_tsd");
+ erts_mtx_init(&tsd_mtx, "drv_tsd", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
}
/*
@@ -176,7 +177,8 @@ erl_drv_mutex_create(char *name)
dmtx->name = no_name;
}
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&dmtx->lcnt, dmtx->name, ERTS_LCNT_LT_MUTEX);
+ erts_lcnt_init_ref_x(&dmtx->lcnt, dmtx->name, NIL,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
#endif
}
return dmtx;
@@ -191,7 +193,7 @@ erl_drv_mutex_destroy(ErlDrvMutex *dmtx)
#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&dmtx->lcnt);
+ erts_lcnt_uninstall(&dmtx->lcnt);
#endif
res = dmtx ? ethr_mutex_destroy(&dmtx->mtx) : EINVAL;
if (res != 0)
@@ -368,7 +370,8 @@ erl_drv_rwlock_create(char *name)
drwlck->name = no_name;
}
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&drwlck->lcnt, drwlck->name, ERTS_LCNT_LT_RWMUTEX);
+ erts_lcnt_init_ref_x(&drwlck->lcnt, drwlck->name, NIL,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
#endif
}
return drwlck;
@@ -383,7 +386,7 @@ erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck)
#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&drwlck->lcnt);
+ erts_lcnt_uninstall(&drwlck->lcnt);
#endif
res = drwlck ? ethr_rwmutex_destroy(&drwlck->rwmtx) : EINVAL;
if (res != 0)
@@ -411,7 +414,7 @@ erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
res = ethr_rwmutex_tryrlock(&drwlck->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LCNT_LO_READ);
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_READ);
#endif
return res;
#else
@@ -426,7 +429,7 @@ erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_rlock(&drwlck->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -442,7 +445,7 @@ erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_runlock(&drwlck->rwmtx);
#endif
@@ -457,7 +460,7 @@ erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
res = ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_RDWR);
#endif
return res;
#else
@@ -472,7 +475,7 @@ erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwlock(&drwlck->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -488,7 +491,7 @@ erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwunlock(&drwlck->rwmtx);
#endif
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index d18016c42e..535f677bb3 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -63,7 +63,8 @@ erts_init_fun_table(void)
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab");
+ erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) fun_hash;
f.cmp = (HCMP_FUN) fun_cmp;
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 3c8bdaa62e..8cb977a7f3 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -274,7 +274,8 @@ erts_init_gc(void)
}
#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info");
+ erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
init_gc_info(&dirty_gc.info);
#endif
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 99995be464..6e5cc7b801 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -103,14 +103,14 @@ typedef enum {
# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore
#endif
-/* Bit 0 to 9 contains scheduler id (see mask below) */
-#define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 10)
-#define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 11)
-#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 12)
-#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 13)
-#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 14)
-#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 15)
-#define ERTS_TMR_ROFLG_CALLBACK (((Uint32) 1) << 16)
+/* Bit 0 to 10 contains scheduler id (see mask below) */
+#define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 11)
+#define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 12)
+#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 13)
+#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 14)
+#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 15)
+#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 16)
+#define ERTS_TMR_ROFLG_CALLBACK (((Uint32) 1) << 17)
#define ERTS_TMR_ROFLG_SID_MASK \
(ERTS_TMR_ROFLG_HLT - (Uint32) 1)
@@ -1273,14 +1273,15 @@ bif_timer_timeout(ErtsHLTimerService *srv,
ERTS_HLT_ASSERT(proc);
}
if (proc) {
+ int dec_refc = 0;
+ ErtsMessage *mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = tmr->btm.bp;
+ tmr->btm.bp = NULL;
+ erts_queue_message(proc, 0, mp, tmr->btm.message,
+ am_clock_service);
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
+ /* If the process is exiting do not disturb the cleanup... */
if (!ERTS_PROC_IS_EXITING(proc)) {
- int dec_refc = 0;
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = tmr->btm.bp;
- tmr->btm.bp = NULL;
- erts_queue_message(proc, 0, mp, tmr->btm.message,
- am_clock_service);
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
#ifdef ERTS_MAGIC_REF_BIF_TIMERS
if (tmr->btm.proc_list.next) {
proc_btm_list_delete(&proc->bif_timers, tmr);
@@ -1293,10 +1294,10 @@ bif_timer_timeout(ErtsHLTimerService *srv,
dec_refc = 1;
}
#endif
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
- if (dec_refc)
- timer_pre_dec_refc((ErtsTimer *) tmr);
}
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+ if (dec_refc)
+ timer_pre_dec_refc((ErtsTimer *) tmr);
}
if (tmr->btm.bp)
free_message_buffer(tmr->btm.bp);
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 6172595552..5206d7564f 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -2357,8 +2357,12 @@ erl_start(int argc, char **argv)
#ifdef ERTS_SMP
erts_start_schedulers();
- /* Let system specific code decide what to do with the main thread... */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_post_startup();
+#endif
+
+ /* Let system specific code decide what to do with the main thread... */
erts_sys_main_thread(); /* May or may not return! */
#else
{
@@ -2373,6 +2377,11 @@ erl_start(int argc, char **argv)
erts_sched_init_time_sup(esdp);
erts_ets_sched_spec_data_init(esdp);
erts_aux_work_timeout_late_init(esdp);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_post_startup();
+#endif
+
process_main(esdp->x_reg_array, esdp->f_reg_array);
}
#endif
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
index 4d4defd8b5..634509f880 100644
--- a/erts/emulator/beam/erl_instrument.c
+++ b/erts/emulator/beam/erl_instrument.c
@@ -1200,7 +1200,8 @@ erts_instr_init(int stat, int map_stat)
stats = erts_alloc(ERTS_ALC_T_INSTR_INFO, sizeof(struct stats_));
- erts_mtx_init(&instr_mutex, "instr");
+ erts_mtx_init(&instr_mutex, "instr", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
mem_anchor = NULL;
@@ -1223,7 +1224,8 @@ erts_instr_init(int stat, int map_stat)
if (map_stat) {
- erts_mtx_init(&instr_x_mutex, "instr_x");
+ erts_mtx_init(&instr_x_mutex, "instr_x", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_instr_memory_map = 1;
erts_instr_stat = 1;
diff --git a/erts/emulator/beam/erl_io_queue.c b/erts/emulator/beam/erl_io_queue.c
new file mode 100644
index 0000000000..301b55b315
--- /dev/null
+++ b/erts/emulator/beam/erl_io_queue.c
@@ -0,0 +1,1238 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "global.h"
+
+#define ERL_WANT_HIPE_BIF_WRAPPER__
+#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
+
+#include "erl_bits.h"
+#include "erl_io_queue.h"
+
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
+#define IOL2V_SMALL_BIN_LIMIT (ERL_ONHEAP_BIN_LIMIT * 4)
+
+static void free_binary(ErtsIOQBinary *b, int driver);
+static ErtsIOQBinary *alloc_binary(Uint size, char *source, void **iov_base, int driver);
+
+void erts_ioq_init(ErtsIOQueue *q, ErtsAlcType_t alct, int driver)
+{
+
+ ERTS_CT_ASSERT(offsetof(ErlNifIOVec,flags) == sizeof(ErtsIOVecCommon));
+ ERTS_CT_ASSERT(sizeof(ErlIOVec) == sizeof(ErtsIOVecCommon));
+ ERTS_CT_ASSERT(sizeof(size_t) == sizeof(ErlDrvSizeT));
+ ERTS_CT_ASSERT(sizeof(size_t) == sizeof(Uint));
+
+ q->alct = alct;
+ q->driver = driver;
+ q->size = 0;
+ q->v_head = q->v_tail = q->v_start = q->v_small;
+ q->v_end = q->v_small + ERTS_SMALL_IO_QUEUE;
+ q->b_head = q->b_tail = q->b_start = q->b_small;
+ q->b_end = q->b_small + ERTS_SMALL_IO_QUEUE;
+}
+
+void erts_ioq_clear(ErtsIOQueue *q)
+{
+ ErtsIOQBinary** binp = q->b_head;
+ int driver = q->driver;
+
+ if (q->v_start != q->v_small)
+ erts_free(q->alct, (void *) q->v_start);
+
+ while(binp < q->b_tail) {
+ if (*binp != NULL)
+ free_binary(*binp, driver);
+ binp++;
+ }
+ if (q->b_start != q->b_small)
+ erts_free(q->alct, (void *) q->b_start);
+ q->v_start = q->v_end = q->v_head = q->v_tail = NULL;
+ q->b_start = q->b_end = q->b_head = q->b_tail = NULL;
+ q->size = 0;
+}
+
+static void free_binary(ErtsIOQBinary *b, int driver)
+{
+ if (driver)
+ driver_free_binary(&b->driver);
+ else if (erts_refc_dectest(&b->nif.intern.refc, 0) == 0)
+ erts_bin_free(&b->nif);
+}
+
+static ErtsIOQBinary *alloc_binary(Uint size, char *source, void **iov_base, int driver)
+{
+ if (driver) {
+ ErlDrvBinary *bin = driver_alloc_binary(size);
+ if (!bin) return NULL;
+ sys_memcpy(bin->orig_bytes, source, size);
+ *iov_base = bin->orig_bytes;
+ return (ErtsIOQBinary *)bin;
+ } else {
+ /* This clause can be triggered in enif_ioq_enq_binary is used */
+ Binary *bin = erts_bin_nrml_alloc(size);
+ if (!bin) return NULL;
+ erts_refc_init(&bin->intern.refc, 1);
+ sys_memcpy(bin->orig_bytes, source, size);
+ *iov_base = bin->orig_bytes;
+ return (ErtsIOQBinary *)bin;
+ }
+}
+
+Uint erts_ioq_size(ErtsIOQueue *q)
+{
+ return q->size;
+}
+
+/* expand queue to hold n elements in tail or head */
+static int expandq(ErtsIOQueue* q, int n, int tail)
+/* tail: 0 if make room in head, make room in tail otherwise */
+{
+ int h_sz; /* room before header */
+ int t_sz; /* room after tail */
+ int q_sz; /* occupied */
+ int nvsz;
+ SysIOVec* niov;
+ ErtsIOQBinary** nbinv;
+
+ h_sz = q->v_head - q->v_start;
+ t_sz = q->v_end - q->v_tail;
+ q_sz = q->v_tail - q->v_head;
+
+ if (tail && (n <= t_sz)) /* do we need to expand tail? */
+ return 0;
+ else if (!tail && (n <= h_sz)) /* do we need to expand head? */
+ return 0;
+ else if (n > (h_sz + t_sz)) { /* need to allocate */
+ /* we may get little extra but it ok */
+ nvsz = (q->v_end - q->v_start) + n;
+
+ niov = erts_alloc_fnf(q->alct, nvsz * sizeof(SysIOVec));
+ if (!niov)
+ return -1;
+ nbinv = erts_alloc_fnf(q->alct, nvsz * sizeof(ErtsIOQBinary**));
+ if (!nbinv) {
+ erts_free(q->alct, (void *) niov);
+ return -1;
+ }
+ if (tail) {
+ sys_memcpy(niov, q->v_head, q_sz*sizeof(SysIOVec));
+ if (q->v_start != q->v_small)
+ erts_free(q->alct, (void *) q->v_start);
+ q->v_start = niov;
+ q->v_end = niov + nvsz;
+ q->v_head = q->v_start;
+ q->v_tail = q->v_head + q_sz;
+
+ sys_memcpy(nbinv, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ if (q->b_start != q->b_small)
+ erts_free(q->alct, (void *) q->b_start);
+ q->b_start = nbinv;
+ q->b_end = nbinv + nvsz;
+ q->b_head = q->b_start;
+ q->b_tail = q->b_head + q_sz;
+ }
+ else {
+ sys_memcpy(niov+nvsz-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
+ if (q->v_start != q->v_small)
+ erts_free(q->alct, (void *) q->v_start);
+ q->v_start = niov;
+ q->v_end = niov + nvsz;
+ q->v_tail = q->v_end;
+ q->v_head = q->v_tail - q_sz;
+
+ sys_memcpy(nbinv+nvsz-q_sz, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ if (q->b_start != q->b_small)
+ erts_free(q->alct, (void *) q->b_start);
+ q->b_start = nbinv;
+ q->b_end = nbinv + nvsz;
+ q->b_tail = q->b_end;
+ q->b_head = q->b_tail - q_sz;
+ }
+ }
+ else if (tail) { /* move to beginning to make room in tail */
+ sys_memmove(q->v_start, q->v_head, q_sz*sizeof(SysIOVec));
+ q->v_head = q->v_start;
+ q->v_tail = q->v_head + q_sz;
+ sys_memmove(q->b_start, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ q->b_head = q->b_start;
+ q->b_tail = q->b_head + q_sz;
+ }
+ else { /* move to end to make room */
+ sys_memmove(q->v_end-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
+ q->v_tail = q->v_end;
+ q->v_head = q->v_tail-q_sz;
+ sys_memmove(q->b_end-q_sz, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ q->b_tail = q->b_end;
+ q->b_head = q->b_tail-q_sz;
+ }
+
+ return 0;
+}
+
+static
+int skip(ErtsIOVec* vec, Uint skipbytes,
+ SysIOVec **iovp, ErtsIOQBinary ***binvp,
+ Uint *lenp)
+{
+ int n;
+ Uint len;
+ SysIOVec* iov;
+ ErtsIOQBinary** binv;
+
+ if (vec->common.size <= skipbytes)
+ return -1;
+
+ iov = vec->common.iov;
+ binv = vec->common.binv;
+ n = vec->common.vsize;
+ /* we use do here to strip iov_len=0 from beginning */
+ do {
+ len = iov->iov_len;
+ if (len <= skipbytes) {
+ skipbytes -= len;
+ iov++;
+ binv++;
+ n--;
+ }
+ else {
+ iov->iov_base = ((char *)(iov->iov_base)) + skipbytes;
+ iov->iov_len -= skipbytes;
+ skipbytes = 0;
+ }
+ } while(skipbytes > 0);
+
+ *binvp = binv;
+ *iovp = iov;
+ *lenp = len;
+
+ return n;
+}
+
+/* Put elements from vec at q tail */
+int erts_ioq_enqv(ErtsIOQueue *q, ErtsIOVec *eiov, Uint skipbytes)
+{
+ int n;
+ Uint len;
+ Uint size = eiov->common.size - skipbytes;
+ SysIOVec *iov;
+ ErtsIOQBinary** binv;
+ ErtsIOQBinary* b;
+
+ if (q == NULL)
+ return -1;
+
+ ASSERT(eiov->common.size >= skipbytes);
+ if (eiov->common.size <= skipbytes)
+ return 0;
+
+ n = skip(eiov, skipbytes, &iov, &binv, &len);
+
+ if (n < 0)
+ return n;
+
+ if (q->v_tail + n >= q->v_end)
+ if (expandq(q, n, 1))
+ return -1;
+
+ /* Queue and reference all binaries (remove zero length items) */
+ while(n--) {
+ if ((len = iov->iov_len) > 0) {
+ if ((b = *binv) == NULL) { /* special case create binary ! */
+ b = alloc_binary(len, iov->iov_base, (void**)&q->v_tail->iov_base,
+ q->driver);
+ if (!b) return -1;
+ *q->b_tail++ = b;
+ q->v_tail->iov_len = len;
+ q->v_tail++;
+ }
+ else {
+ if (q->driver)
+ driver_binary_inc_refc(&b->driver);
+ else
+ erts_refc_inc(&b->nif.intern.refc, 1);
+ *q->b_tail++ = b;
+ *q->v_tail++ = *iov;
+ }
+ }
+ iov++;
+ binv++;
+ }
+ q->size += size; /* update total size in queue */
+ return 0;
+}
+
+/* Put elements from vec at q head */
+int erts_ioq_pushqv(ErtsIOQueue *q, ErtsIOVec* vec, Uint skipbytes)
+{
+ int n;
+ Uint len;
+ Uint size = vec->common.size - skipbytes;
+ SysIOVec* iov;
+ ErtsIOQBinary** binv;
+ ErtsIOQBinary* b;
+
+ if (q == NULL)
+ return -1;
+
+ ASSERT(vec->common.size >= skipbytes);
+ if (vec->common.size <= skipbytes)
+ return 0;
+
+ n = skip(vec, skipbytes, &iov, &binv, &len);
+
+ if (n < 0)
+ return n;
+
+ if (q->v_head - n < q->v_start)
+ if (expandq(q, n, 0))
+ return -1;
+
+ /* Queue and reference all binaries (remove zero length items) */
+ iov += (n-1); /* move to end */
+ binv += (n-1); /* move to end */
+ while(n--) {
+ if ((len = iov->iov_len) > 0) {
+ if ((b = *binv) == NULL) { /* special case create binary ! */
+ if (q->driver) {
+ ErlDrvBinary *bin = driver_alloc_binary(len);
+ if (!bin) return -1;
+ sys_memcpy(bin->orig_bytes, iov->iov_base, len);
+ b = (ErtsIOQBinary *)bin;
+ q->v_head->iov_base = bin->orig_bytes;
+ }
+ *--q->b_head = b;
+ q->v_head--;
+ q->v_head->iov_len = len;
+ }
+ else {
+ if (q->driver)
+ driver_binary_inc_refc(&b->driver);
+ else
+ erts_refc_inc(&b->nif.intern.refc, 1);
+ *--q->b_head = b;
+ *--q->v_head = *iov;
+ }
+ }
+ iov--;
+ binv--;
+ }
+ q->size += size; /* update total size in queue */
+ return 0;
+}
+
+
+/*
+** Remove size bytes from queue head
+** Return number of bytes that remain in queue
+*/
+int erts_ioq_deq(ErtsIOQueue *q, Uint size)
+{
+ Uint len;
+
+ if ((q == NULL) || (q->size < size))
+ return -1;
+ q->size -= size;
+ while (size > 0) {
+ ASSERT(q->v_head != q->v_tail);
+
+ len = q->v_head->iov_len;
+ if (len <= size) {
+ size -= len;
+ free_binary(*q->b_head, q->driver);
+ *q->b_head++ = NULL;
+ q->v_head++;
+ }
+ else {
+ q->v_head->iov_base = ((char *)(q->v_head->iov_base)) + size;
+ q->v_head->iov_len -= size;
+ size = 0;
+ }
+ }
+
+ /* restart pointers (optimised for enq) */
+ if (q->v_head == q->v_tail) {
+ q->v_head = q->v_tail = q->v_start;
+ q->b_head = q->b_tail = q->b_start;
+ }
+ return 0;
+}
+
+
+Uint erts_ioq_peekqv(ErtsIOQueue *q, ErtsIOVec *ev) {
+ ASSERT(ev);
+
+ if (! q) {
+ return (Uint) -1;
+ } else {
+ if ((ev->common.vsize = q->v_tail - q->v_head) == 0) {
+ ev->common.size = 0;
+ ev->common.iov = NULL;
+ ev->common.binv = NULL;
+ } else {
+ ev->common.size = q->size;
+ ev->common.iov = q->v_head;
+ ev->common.binv = q->b_head;
+ }
+ return q->size;
+ }
+}
+
+SysIOVec* erts_ioq_peekq(ErtsIOQueue *q, int* vlenp) /* length of io-vector */
+{
+
+ if (q == NULL) {
+ *vlenp = -1;
+ return NULL;
+ }
+ if ((*vlenp = (q->v_tail - q->v_head)) == 0)
+ return NULL;
+ return q->v_head;
+}
+
+/* Fills a possibly deep list of chars and binaries into vec
+** Small characters are first stored in the buffer buf of length ln
+** binaries found are copied and linked into msoh
+** Return vector length on succsess,
+** -1 on overflow
+** -2 on type error
+*/
+
+static ERTS_INLINE void
+io_list_to_vec_set_vec(SysIOVec **iov, ErtsIOQBinary ***binv,
+ ErtsIOQBinary *bin, byte *ptr, Uint len,
+ int *vlen)
+{
+ while (len > MAX_SYSIOVEC_IOVLEN) {
+ (*iov)->iov_base = ptr;
+ (*iov)->iov_len = MAX_SYSIOVEC_IOVLEN;
+ ptr += MAX_SYSIOVEC_IOVLEN;
+ len -= MAX_SYSIOVEC_IOVLEN;
+ (*iov)++;
+ (*vlen)++;
+ *(*binv)++ = bin;
+ }
+ (*iov)->iov_base = ptr;
+ (*iov)->iov_len = len;
+ *(*binv)++ = bin;
+ (*iov)++;
+ (*vlen)++;
+}
+
+int
+erts_ioq_iolist_to_vec(Eterm obj, /* io-list */
+ SysIOVec* iov, /* io vector */
+ ErtsIOQBinary** binv, /* binary reference vector */
+ ErtsIOQBinary* cbin, /* binary to store characters */
+ Uint bin_limit, /* small binaries limit */
+ int driver)
+{
+ DECLARE_ESTACK(s);
+ Eterm* objp;
+ byte *buf = NULL;
+ Uint len = 0;
+ Uint csize = 0;
+ int vlen = 0;
+ byte* cptr;
+
+ if (cbin) {
+ if (driver) {
+ buf = (byte*)cbin->driver.orig_bytes;
+ len = cbin->driver.orig_size;
+ } else {
+ buf = (byte*)cbin->nif.orig_bytes;
+ len = cbin->nif.orig_size;
+ }
+ }
+ cptr = buf;
+
+ goto L_jump_start; /* avoid push */
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_jump_start:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ if (len == 0)
+ goto L_overflow;
+ *buf++ = unsigned_val(obj);
+ csize++;
+ len--;
+ } else if (is_binary(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto handle_binary;
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ obj = CDR(objp);
+ if (is_list(obj))
+ goto L_iter_list; /* on tail */
+ else if (is_binary(obj)) {
+ goto handle_binary;
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ } else if (is_binary(obj)) {
+ Eterm real_bin;
+ Uint offset;
+ Eterm* bptr;
+ Uint size;
+ int bitoffs;
+ int bitsize;
+
+ handle_binary:
+ size = binary_size(obj);
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
+ ASSERT(bitsize == 0);
+ bptr = binary_val(real_bin);
+ if (*bptr == HEADER_PROC_BIN) {
+ ProcBin* pb = (ProcBin *) bptr;
+ if (bitoffs != 0) {
+ if (len < size) {
+ goto L_overflow;
+ }
+ erts_copy_bits(pb->bytes+offset, bitoffs, 1,
+ (byte *) buf, 0, 1, size*8);
+ csize += size;
+ buf += size;
+ len -= size;
+ } else if (bin_limit && size < bin_limit) {
+ if (len < size) {
+ goto L_overflow;
+ }
+ sys_memcpy(buf, pb->bytes+offset, size);
+ csize += size;
+ buf += size;
+ len -= size;
+ } else {
+ ErtsIOQBinary *qbin;
+ if (csize != 0) {
+ io_list_to_vec_set_vec(&iov, &binv, cbin,
+ cptr, csize, &vlen);
+ cptr = buf;
+ csize = 0;
+ }
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ }
+ if (driver)
+ qbin = (ErtsIOQBinary*)Binary2ErlDrvBinary(pb->val);
+ else
+ qbin = (ErtsIOQBinary*)pb->val;
+
+ io_list_to_vec_set_vec(
+ &iov, &binv, qbin,
+ pb->bytes+offset, size, &vlen);
+ }
+ } else {
+ ErlHeapBin* hb = (ErlHeapBin *) bptr;
+ if (len < size) {
+ goto L_overflow;
+ }
+ copy_binary_to_buffer(buf, 0,
+ ((byte *) hb->data)+offset, bitoffs,
+ 8*size);
+ csize += size;
+ buf += size;
+ len -= size;
+ }
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+
+ if (csize != 0) {
+ io_list_to_vec_set_vec(&iov, &binv, cbin, cptr, csize, &vlen);
+ }
+
+ DESTROY_ESTACK(s);
+ return vlen;
+
+ L_type_error:
+ DESTROY_ESTACK(s);
+ return -2;
+
+ L_overflow:
+ DESTROY_ESTACK(s);
+ return -1;
+}
+
+static ERTS_INLINE int
+io_list_vec_count(Eterm obj, Uint *v_size,
+ Uint *c_size, Uint *b_size, Uint *in_clist,
+ Uint *p_v_size, Uint *p_c_size, Uint *p_in_clist,
+ Uint blimit)
+{
+ Uint size = binary_size(obj);
+ Eterm real;
+ ERTS_DECLARE_DUMMY(Uint offset);
+ int bitoffs;
+ int bitsize;
+ ERTS_GET_REAL_BIN(obj, real, offset, bitoffs, bitsize);
+ if (bitsize != 0) return 1;
+ if (thing_subtag(*binary_val(real)) == REFC_BINARY_SUBTAG &&
+ bitoffs == 0) {
+ *b_size += size;
+ if (*b_size < size) return 2;
+ *in_clist = 0;
+ ++*v_size;
+ /* If iov_len is smaller then Uint we split the binary into*/
+ /* multiple smaller (2GB) elements in the iolist.*/
+ *v_size += size / MAX_SYSIOVEC_IOVLEN;
+ if (size >= blimit) {
+ *p_in_clist = 0;
+ ++*p_v_size;
+ } else {
+ *p_c_size += size;
+ if (!*p_in_clist) {
+ *p_in_clist = 1;
+ ++*p_v_size;
+ }
+ }
+ } else {
+ *c_size += size;
+ if (*c_size < size) return 2;
+ if (!*in_clist) {
+ *in_clist = 1;
+ ++*v_size;
+ }
+ *p_c_size += size;
+ if (!*p_in_clist) {
+ *p_in_clist = 1;
+ ++*p_v_size;
+ }
+ }
+ return 0;
+}
+
+#define IO_LIST_VEC_COUNT(obj) \
+ do { \
+ switch (io_list_vec_count(obj, &v_size, &c_size, \
+ &b_size, &in_clist, \
+ &p_v_size, &p_c_size, &p_in_clist, \
+ blimit)) { \
+ case 1: goto L_type_error; \
+ case 2: goto L_overflow_error; \
+ default: break; \
+ } \
+ } while(0)
+
+/*
+ * Returns 0 if successful and a non-zero value otherwise.
+ *
+ * Return values through pointers:
+ * *vsize - SysIOVec size needed for a writev
+ * *csize - Number of bytes not in binary (in the common binary)
+ * *pvsize - SysIOVec size needed if packing small binaries
+ * *pcsize - Number of bytes in the common binary if packing
+ * *total_size - Total size of iolist in bytes
+ */
+int
+erts_ioq_iolist_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ Uint* total_size, Uint blimit)
+{
+ DECLARE_ESTACK(s);
+ Eterm* objp;
+ Uint v_size = 0;
+ Uint c_size = 0;
+ Uint b_size = 0;
+ Uint in_clist = 0;
+ Uint p_v_size = 0;
+ Uint p_c_size = 0;
+ Uint p_in_clist = 0;
+ Uint total;
+
+ goto L_jump_start; /* avoid a push */
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_jump_start:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ obj = CAR(objp);
+
+ if (is_byte(obj)) {
+ c_size++;
+ if (c_size == 0) {
+ goto L_overflow_error;
+ }
+ if (!in_clist) {
+ in_clist = 1;
+ v_size++;
+ }
+ p_c_size++;
+ if (!p_in_clist) {
+ p_in_clist = 1;
+ p_v_size++;
+ }
+ }
+ else if (is_binary(obj)) {
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+
+ obj = CDR(objp);
+ if (is_list(obj))
+ goto L_iter_list; /* on tail */
+ else if (is_binary(obj)) { /* binary tail is OK */
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+ else if (is_binary(obj)) {
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+
+ total = c_size + b_size;
+ if (total < c_size) {
+ goto L_overflow_error;
+ }
+ *total_size = total;
+
+ DESTROY_ESTACK(s);
+ *vsize = v_size;
+ *csize = c_size;
+ *pvsize = p_v_size;
+ *pcsize = p_c_size;
+ return 0;
+
+ L_type_error:
+ L_overflow_error:
+ DESTROY_ESTACK(s);
+ return 1;
+}
+
+typedef struct {
+ Eterm result_head;
+ Eterm result_tail;
+ Eterm input_list;
+
+ UWord acc_size;
+ Binary *acc;
+
+ /* We yield after copying this many bytes into the accumulator (Minus
+ * eating a few on consing etc). Large binaries will only count to the
+ * extent their split (if any) resulted in a copy op. */
+ UWord bytereds_available;
+ UWord bytereds_spent;
+
+ Process *process;
+ ErtsEStack estack;
+
+ Eterm magic_reference;
+} iol2v_state_t;
+
+static int iol2v_state_destructor(Binary *data) {
+ iol2v_state_t *state = ERTS_MAGIC_BIN_UNALIGNED_DATA(data);
+
+ DESTROY_SAVED_ESTACK(&state->estack);
+
+ if (state->acc != NULL) {
+ erts_bin_free(state->acc);
+ }
+
+ return 1;
+}
+
+static void iol2v_init(iol2v_state_t *state, Process *process, Eterm input) {
+ state->process = process;
+
+ state->result_head = NIL;
+ state->result_tail = NIL;
+ state->input_list = input;
+
+ state->magic_reference = NIL;
+ state->acc_size = 0;
+ state->acc = NULL;
+
+ CLEAR_SAVED_ESTACK(&state->estack);
+}
+
+static Eterm iol2v_make_sub_bin(iol2v_state_t *state, Eterm bin_term,
+ UWord offset, UWord size) {
+ Uint byte_offset, bit_offset, bit_size;
+ ErlSubBin *sb;
+ Eterm orig_pb_term;
+
+ sb = (ErlSubBin*)HAlloc(state->process, ERL_SUB_BIN_SIZE);
+
+ ERTS_GET_REAL_BIN(bin_term, orig_pb_term,
+ byte_offset, bit_offset, bit_size);
+
+ (void)bit_offset;
+ (void)bit_size;
+
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->bitsize = 0;
+ sb->bitoffs = 0;
+ sb->orig = orig_pb_term;
+ sb->is_writable = 0;
+
+ sb->offs = byte_offset + offset;
+ sb->size = size;
+
+ return make_binary(sb);
+}
+
+static Eterm iol2v_promote_acc(iol2v_state_t *state) {
+ ProcBin *pb;
+
+ state->acc = erts_bin_realloc(state->acc, state->acc_size);
+
+ pb = (ProcBin*)HAlloc(state->process, PROC_BIN_SIZE);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = state->acc_size;
+ pb->val = state->acc;
+ pb->bytes = (byte*)(state->acc)->orig_bytes;
+ pb->flags = 0;
+ pb->next = MSO(state->process).first;
+ OH_OVERHEAD(&(MSO(state->process)), pb->size / sizeof(Eterm));
+ MSO(state->process).first = (struct erl_off_heap_header*)pb;
+
+ state->acc_size = 0;
+ state->acc = NULL;
+
+ return make_binary(pb);
+}
+
+/* Destructively enqueues a term to the result list, saving us the hassle of
+ * having to reverse it later. This is safe since GC is disabled and we never
+ * leak the unfinished term to the outside. */
+static void iol2v_enqueue_result(iol2v_state_t *state, Eterm term) {
+ Eterm prev_tail;
+ Eterm *hp;
+
+ prev_tail = state->result_tail;
+
+ hp = HAlloc(state->process, 2);
+ state->result_tail = CONS(hp, term, NIL);
+
+ if(prev_tail != NIL) {
+ Eterm *prev_cell = list_val(prev_tail);
+ CDR(prev_cell) = state->result_tail;
+ } else {
+ state->result_head = state->result_tail;
+ }
+
+ state->bytereds_spent += 1;
+}
+
+#ifndef DEBUG
+ #define ACC_REALLOCATION_LIMIT (IOL2V_SMALL_BIN_LIMIT * 32)
+#else
+ #define ACC_REALLOCATION_LIMIT (IOL2V_SMALL_BIN_LIMIT * 4)
+#endif
+
+static void iol2v_expand_acc(iol2v_state_t *state, UWord extra) {
+ UWord required_bytes, acc_alloc_size;
+
+ ERTS_CT_ASSERT(ERTS_UWORD_MAX > ACC_REALLOCATION_LIMIT / 2);
+ ASSERT(extra >= 1);
+
+ acc_alloc_size = state->acc != NULL ? (state->acc)->orig_size : 0;
+ required_bytes = state->acc_size + extra;
+
+ if (state->acc == NULL) {
+ UWord new_size = MAX(required_bytes, IOL2V_SMALL_BIN_LIMIT);
+
+ state->acc = erts_bin_nrml_alloc(new_size);
+ } else if (required_bytes > acc_alloc_size) {
+ Binary *prev_acc;
+ UWord new_size;
+
+ if (acc_alloc_size >= ACC_REALLOCATION_LIMIT) {
+ /* We skip reallocating once we hit a certain point; it often
+ * results in extra copying and we're very likely to overallocate
+ * on anything other than absurdly long byte/heapbin sequences. */
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ iol2v_expand_acc(state, extra);
+ return;
+ }
+
+ new_size = MAX(required_bytes, acc_alloc_size * 2);
+ prev_acc = state->acc;
+
+ state->acc = erts_bin_realloc(prev_acc, new_size);
+
+ if (prev_acc != state->acc) {
+ state->bytereds_spent += state->acc_size;
+ }
+ }
+
+ state->bytereds_spent += extra;
+}
+
+static int iol2v_append_byte_seq(iol2v_state_t *state, Eterm seq_start, Eterm *seq_end) {
+ Eterm lookahead, iterator;
+ Uint observed_bits;
+ SWord seq_length;
+ char *acc_data;
+
+ lookahead = seq_start;
+ seq_length = 0;
+
+ ASSERT(state->bytereds_available > state->bytereds_spent);
+
+ while (is_list(lookahead)) {
+ Eterm *cell = list_val(lookahead);
+
+ if (!is_small(CAR(cell))) {
+ break;
+ }
+
+ if (seq_length * 2 >= (state->bytereds_available - state->bytereds_spent)) {
+ break;
+ }
+
+ lookahead = CDR(cell);
+ seq_length += 1;
+ }
+
+ ASSERT(seq_length >= 1);
+
+ iol2v_expand_acc(state, seq_length);
+
+ /* Bump a few extra reductions to account for list traversal. */
+ state->bytereds_spent += seq_length;
+
+ acc_data = &(state->acc)->orig_bytes[state->acc_size];
+ state->acc_size += seq_length;
+
+ iterator = seq_start;
+ observed_bits = 0;
+
+ while (iterator != lookahead) {
+ Eterm *cell;
+ Uint byte;
+
+ cell = list_val(iterator);
+ iterator = CDR(cell);
+
+ byte = unsigned_val(CAR(cell));
+ observed_bits |= byte;
+
+ ASSERT(acc_data < &(state->acc)->orig_bytes[state->acc_size]);
+ *(acc_data++) = byte;
+ }
+
+ if (observed_bits > UCHAR_MAX) {
+ return 0;
+ }
+
+ ASSERT(acc_data == &(state->acc)->orig_bytes[state->acc_size]);
+ *seq_end = iterator;
+
+ return 1;
+}
+
+static int iol2v_append_binary(iol2v_state_t *state, Eterm bin_term) {
+ int is_acc_small, is_bin_small;
+ UWord combined_size;
+ UWord binary_size;
+
+ Uint byte_offset, bit_offset, bit_size;
+ Eterm *parent_header;
+ Eterm parent_binary;
+ byte *binary_data;
+
+ ASSERT(state->bytereds_available > state->bytereds_spent);
+
+ ERTS_GET_REAL_BIN(bin_term, parent_binary, byte_offset, bit_offset, bit_size);
+ parent_header = binary_val(parent_binary);
+ binary_size = binary_size(bin_term);
+
+ if (bit_offset != 0 || bit_size != 0) {
+ return 0;
+ } else if (binary_size == 0) {
+ state->bytereds_spent += 1;
+ return 1;
+ }
+
+ is_acc_small = state->acc_size < IOL2V_SMALL_BIN_LIMIT;
+ is_bin_small = binary_size < IOL2V_SMALL_BIN_LIMIT;
+ combined_size = binary_size + state->acc_size;
+
+ if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) {
+ ProcBin *pb = (ProcBin*)parent_header;
+
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ }
+
+ binary_data = pb->bytes;
+ } else {
+ ErlHeapBin *hb = (ErlHeapBin*)parent_header;
+
+ ASSERT(thing_subtag(*parent_header) == HEAP_BINARY_SUBTAG);
+ ASSERT(is_bin_small);
+
+ binary_data = &((unsigned char*)&hb->data)[byte_offset];
+ }
+
+ if (!is_bin_small && (state->acc_size == 0 || !is_acc_small)) {
+ /* Avoid combining if we encounter an acceptably large binary while the
+ * accumulator is either empty or large enough to be returned on its
+ * own. */
+ if (state->acc_size != 0) {
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ }
+
+ iol2v_enqueue_result(state, bin_term);
+ } else if (is_bin_small || combined_size < (IOL2V_SMALL_BIN_LIMIT * 2)) {
+ /* If the candidate is small or we can't split the combination in two,
+ * then just copy it into the accumulator. */
+ iol2v_expand_acc(state, binary_size);
+
+ sys_memcpy(&(state->acc)->orig_bytes[state->acc_size],
+ binary_data, binary_size);
+
+ state->acc_size += binary_size;
+ } else {
+ /* Otherwise, append enough data for the accumulator to be valid, and
+ * then return the rest as a sub-binary. */
+ UWord spill = IOL2V_SMALL_BIN_LIMIT - state->acc_size;
+ Eterm binary_tail;
+
+ iol2v_expand_acc(state, spill);
+
+ sys_memcpy(&(state->acc)->orig_bytes[state->acc_size],
+ binary_data, spill);
+
+ state->acc_size += spill;
+
+ binary_tail = iol2v_make_sub_bin(state, bin_term, spill,
+ binary_size - spill);
+
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ iol2v_enqueue_result(state, binary_tail);
+ }
+
+ return 1;
+}
+
+static BIF_RETTYPE iol2v_yield(iol2v_state_t *state) {
+ if (is_nil(state->magic_reference)) {
+ iol2v_state_t *boxed_state;
+ Binary *magic_binary;
+ Eterm *hp;
+
+ magic_binary = erts_create_magic_binary_x(sizeof(*state),
+ &iol2v_state_destructor, ERTS_ALC_T_BINARY, 1);
+
+ boxed_state = ERTS_MAGIC_BIN_UNALIGNED_DATA(magic_binary);
+ sys_memcpy(boxed_state, state, sizeof(*state));
+
+ hp = HAlloc(boxed_state->process, ERTS_MAGIC_REF_THING_SIZE);
+ boxed_state->magic_reference =
+ erts_mk_magic_ref(&hp, &MSO(boxed_state->process), magic_binary);
+
+ state = boxed_state;
+ }
+
+ ERTS_BIF_YIELD1(bif_export[BIF_iolist_to_iovec_1],
+ state->process, state->magic_reference);
+}
+
+static BIF_RETTYPE iol2v_continue(iol2v_state_t *state) {
+ Eterm iterator;
+
+ DECLARE_ESTACK(s);
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+
+ state->bytereds_available =
+ ERTS_BIF_REDS_LEFT(state->process) * IOL2V_SMALL_BIN_LIMIT;
+ state->bytereds_spent = 0;
+
+ if (state->estack.start) {
+ ESTACK_RESTORE(s, &state->estack);
+ }
+
+ iterator = state->input_list;
+
+ for(;;) {
+ if (state->bytereds_spent >= state->bytereds_available) {
+ ESTACK_SAVE(s, &state->estack);
+ state->input_list = iterator;
+
+ return iol2v_yield(state);
+ }
+
+ while (is_list(iterator)) {
+ Eterm *cell;
+ Eterm head;
+
+ cell = list_val(iterator);
+ head = CAR(cell);
+
+ if (is_binary(head)) {
+ if (!iol2v_append_binary(state, head)) {
+ goto l_badarg;
+ }
+
+ iterator = CDR(cell);
+ } else if (is_small(head)) {
+ Eterm seq_end;
+
+ if (!iol2v_append_byte_seq(state, iterator, &seq_end)) {
+ goto l_badarg;
+ }
+
+ iterator = seq_end;
+ } else if (is_list(head) || is_nil(head)) {
+ Eterm tail = CDR(cell);
+
+ if (!is_nil(tail)) {
+ ESTACK_PUSH(s, tail);
+ }
+
+ state->bytereds_spent += 1;
+ iterator = head;
+ } else {
+ goto l_badarg;
+ }
+
+ if (state->bytereds_spent >= state->bytereds_available) {
+ ESTACK_SAVE(s, &state->estack);
+ state->input_list = iterator;
+
+ return iol2v_yield(state);
+ }
+ }
+
+ if (is_binary(iterator)) {
+ if (!iol2v_append_binary(state, iterator)) {
+ goto l_badarg;
+ }
+ } else if (!is_nil(iterator)) {
+ goto l_badarg;
+ }
+
+ if(ESTACK_ISEMPTY(s)) {
+ break;
+ }
+
+ iterator = ESTACK_POP(s);
+ }
+
+ if (state->acc_size != 0) {
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ }
+
+ BUMP_REDS(state->process, state->bytereds_spent / IOL2V_SMALL_BIN_LIMIT);
+
+ CLEAR_SAVED_ESTACK(&state->estack);
+ DESTROY_ESTACK(s);
+
+ BIF_RET(state->result_head);
+
+l_badarg:
+ CLEAR_SAVED_ESTACK(&state->estack);
+ DESTROY_ESTACK(s);
+
+ if (state->acc != NULL) {
+ erts_bin_free(state->acc);
+ state->acc = NULL;
+ }
+
+ BIF_ERROR(state->process, BADARG);
+}
+
+HIPE_WRAPPER_BIF_DISABLE_GC(iolist_to_iovec, 1)
+
+BIF_RETTYPE iolist_to_iovec_1(BIF_ALIST_1) {
+ BIF_RETTYPE result;
+
+ if (is_nil(BIF_ARG_1)) {
+ BIF_RET(NIL);
+ } else if (is_binary(BIF_ARG_1)) {
+ if (binary_size(BIF_ARG_1) != 0) {
+ Eterm *hp = HAlloc(BIF_P, 2);
+
+ BIF_RET(CONS(hp, BIF_ARG_1, NIL));
+ } else {
+ BIF_RET(NIL);
+ }
+ } else if (is_internal_magic_ref(BIF_ARG_1)) {
+ iol2v_state_t *state;
+ Binary *magic;
+
+ magic = erts_magic_ref2bin(BIF_ARG_1);
+
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != &iol2v_state_destructor) {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+
+ state = ERTS_MAGIC_BIN_UNALIGNED_DATA(magic);
+ result = iol2v_continue(state);
+ } else if (!is_list(BIF_ARG_1)) {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_ERROR(BIF_P, BADARG);
+ } else {
+ iol2v_state_t state;
+
+ iol2v_init(&state, BIF_P, BIF_ARG_1);
+
+ erts_set_gc_state(BIF_P, 0);
+
+ result = iol2v_continue(&state);
+ }
+
+ if (result != THE_NON_VALUE || BIF_P->freason != TRAP) {
+ erts_set_gc_state(BIF_P, 1);
+ }
+
+ BIF_RET(result);
+}
diff --git a/erts/emulator/beam/erl_io_queue.h b/erts/emulator/beam/erl_io_queue.h
new file mode 100644
index 0000000000..51abe99510
--- /dev/null
+++ b/erts/emulator/beam/erl_io_queue.h
@@ -0,0 +1,201 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A queue used for storing binary data that should be
+ * passed to writev or similar functions. Used by both
+ * the nif and driver api.
+ *
+ * Author: Lukas Larsson
+ */
+
+#ifndef ERL_IO_QUEUE_H__TYPES__
+#define ERL_IO_QUEUE_H__TYPES__
+
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
+#include "erl_nif.h"
+
+#ifdef DEBUG
+#define MAX_SYSIOVEC_IOVLEN (1ull << (32 - 1))
+#else
+#define MAX_SYSIOVEC_IOVLEN (1ull << (sizeof(((SysIOVec*)0)->iov_len) * 8 - 1))
+#endif
+
+#define ERTS_SMALL_IO_QUEUE 5
+
+typedef union {
+ ErlDrvBinary driver;
+ Binary nif;
+} ErtsIOQBinary;
+
+typedef struct {
+ int vsize; /* length of vectors */
+ Uint size; /* total size in bytes */
+ SysIOVec* iov;
+ ErtsIOQBinary** binv;
+} ErtsIOVecCommon;
+
+typedef union {
+ ErtsIOVecCommon common;
+ ErlIOVec driver;
+ ErlNifIOVec nif;
+} ErtsIOVec;
+
+/* head/tail represent the data in the queue
+ * start/end represent the edges of the allocated queue
+ * small is used when the number of iovec elements is < SMALL_IO_QUEUE
+ */
+typedef struct erts_io_queue {
+ ErtsAlcType_t alct;
+ int driver;
+ Uint size; /* total size in bytes */
+
+ SysIOVec* v_start;
+ SysIOVec* v_end;
+ SysIOVec* v_head;
+ SysIOVec* v_tail;
+ SysIOVec v_small[ERTS_SMALL_IO_QUEUE];
+
+ ErtsIOQBinary **b_start;
+ ErtsIOQBinary **b_end;
+ ErtsIOQBinary **b_head;
+ ErtsIOQBinary **b_tail;
+ ErtsIOQBinary *b_small[ERTS_SMALL_IO_QUEUE];
+
+} ErtsIOQueue;
+
+#endif /* ERL_IO_QUEUE_H__TYPES__ */
+
+#if !defined(ERL_IO_QUEUE_H) && !defined(ERTS_IO_QUEUE_TYPES_ONLY__)
+#define ERL_IO_QUEUE_H
+
+#include "erl_binary.h"
+#include "erl_bits.h"
+
+void erts_ioq_init(ErtsIOQueue *q, ErtsAlcType_t alct, int driver);
+void erts_ioq_clear(ErtsIOQueue *q);
+Uint erts_ioq_size(ErtsIOQueue *q);
+int erts_ioq_enqv(ErtsIOQueue *q, ErtsIOVec *vec, Uint skip);
+int erts_ioq_pushqv(ErtsIOQueue *q, ErtsIOVec *vec, Uint skip);
+int erts_ioq_deq(ErtsIOQueue *q, Uint Uint);
+Uint erts_ioq_peekqv(ErtsIOQueue *q, ErtsIOVec *ev);
+SysIOVec *erts_ioq_peekq(ErtsIOQueue *q, int *vlenp);
+Uint erts_ioq_sizeq(ErtsIOQueue *q);
+
+int erts_ioq_iolist_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ Uint* total_size, Uint blimit);
+int erts_ioq_iolist_to_vec(Eterm obj, SysIOVec* iov,
+ ErtsIOQBinary** binv, ErtsIOQBinary* cbin,
+ Uint bin_limit, int driver_binary);
+
+ERTS_GLB_INLINE
+int erts_ioq_iodata_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ Uint* total_size, Uint blimit);
+ERTS_GLB_INLINE
+int erts_ioq_iodata_to_vec(Eterm obj, SysIOVec* iov,
+ ErtsIOQBinary** binv, ErtsIOQBinary* cbin,
+ Uint bin_limit, int driver_binary);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+int erts_ioq_iodata_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ Uint* total_size, Uint blimit) {
+ if (is_binary(obj)) {
+ /* We optimize for when we get a procbin without a bit-offset
+ * that fits in one iov slot
+ */
+ Eterm real_bin;
+ byte bitoffs;
+ byte bitsize;
+ ERTS_DECLARE_DUMMY(Uint offset);
+ Uint size = binary_size(obj);
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
+ if (size < MAX_SYSIOVEC_IOVLEN && bitoffs == 0 && bitsize == 0) {
+ *vsize = 1;
+ *pvsize = 1;
+ if (thing_subtag(*binary_val(real_bin)) == REFC_BINARY_SUBTAG) {
+ *csize = 0;
+ *pcsize = 0;
+ } else {
+ *csize = size;
+ *pcsize = size;
+ }
+ *total_size = size;
+ return 0;
+ }
+ }
+
+ return erts_ioq_iolist_vec_len(obj, vsize, csize,
+ pvsize, pcsize, total_size, blimit);
+}
+
+ERTS_GLB_INLINE
+int erts_ioq_iodata_to_vec(Eterm obj,
+ SysIOVec *iov,
+ ErtsIOQBinary **binv,
+ ErtsIOQBinary *cbin,
+ Uint bin_limit,
+ int driver)
+{
+ if (is_binary(obj)) {
+ Eterm real_bin;
+ byte bitoffs;
+ byte bitsize;
+ Uint offset;
+ Uint size = binary_size(obj);
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
+ if (size < MAX_SYSIOVEC_IOVLEN && bitoffs == 0 && bitsize == 0) {
+ Eterm *bptr = binary_val(real_bin);
+ if (thing_subtag(*bptr) == REFC_BINARY_SUBTAG) {
+ ProcBin *pb = (ProcBin *)bptr;
+ if (pb->flags)
+ erts_emasculate_writable_binary(pb);
+ iov[0].iov_base = pb->bytes+offset;
+ iov[0].iov_len = size;
+ if (driver)
+ binv[0] = (ErtsIOQBinary*)Binary2ErlDrvBinary(pb->val);
+ else
+ binv[0] = (ErtsIOQBinary*)pb->val;
+ return 1;
+ } else {
+ ErlHeapBin* hb = (ErlHeapBin *)bptr;
+ byte *buf = driver ? (byte*)cbin->driver.orig_bytes :
+ (byte*)cbin->nif.orig_bytes;
+ copy_binary_to_buffer(buf, 0, ((byte *) hb->data)+offset, 0, 8*size);
+ iov[0].iov_base = buf;
+ iov[0].iov_len = size;
+ binv[0] = cbin;
+ return 1;
+ }
+ }
+ }
+ return erts_ioq_iolist_to_vec(obj, iov, binv, cbin, bin_limit, driver);
+}
+
+#endif
+
+#endif /* ERL_IO_QUEUE_H */
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index f270d8baef..189c88ac4a 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -104,7 +104,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "db_tab", "address" },
{ "proc_status", "pid" },
{ "proc_trace", "pid" },
- { "ports_snapshot", NULL },
{ "db_tab_fix", "address" },
{ "db_hash_slot", "address" },
{ "node_table", NULL },
@@ -161,6 +160,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "mtrace_op", NULL },
{ "instr_x", NULL },
{ "instr", NULL },
+#ifdef ERTS_SMP
+ { "pollsets_lock", NULL },
+#endif
{ "alcu_allocator", "index" },
{ "mseg", NULL },
#ifdef ERTS_SMP
@@ -169,11 +171,10 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "xports_list_pre_alloc_lock", "address" },
{ "inet_buffer_stack_lock", NULL },
{ "system_block", NULL },
- { "timeofday", NULL },
{ "get_time", NULL },
{ "get_corrected_time", NULL },
+ { "runtime", NULL },
{ "breakpoints", NULL },
- { "pollsets_lock", NULL },
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
@@ -199,41 +200,20 @@ static erts_lc_lock_order_t erts_lock_order[] = {
#define ERTS_LOCK_ORDER_SIZE \
(sizeof(erts_lock_order)/sizeof(erts_lc_lock_order_t))
-#define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
- (((LCKD_FLG) & (ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)) \
- && ((LCK_FLG) \
- & ERTS_LC_FLG_LT_ALL \
- & ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
+#define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
+ (((LCKD_FLG) & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_SPINLOCK \
+ && \
+ ((LCK_FLG) & ERTS_LOCK_FLAGS_MASK_TYPE) != ERTS_LOCK_FLAGS_TYPE_SPINLOCK)
static __decl_noreturn void __noreturn lc_abort(void);
-static char *
-lock_type(Uint16 flags)
+static const char *rw_op_str(erts_lock_options_t options)
{
- switch (flags & ERTS_LC_FLG_LT_ALL) {
- case ERTS_LC_FLG_LT_SPINLOCK: return "[spinlock]";
- case ERTS_LC_FLG_LT_RWSPINLOCK: return "[rw(spin)lock]";
- case ERTS_LC_FLG_LT_MUTEX: return "[mutex]";
- case ERTS_LC_FLG_LT_RWMUTEX: return "[rwmutex]";
- case ERTS_LC_FLG_LT_PROCLOCK: return "[proclock]";
- default: return "";
+ if(options == ERTS_LOCK_OPTIONS_WRITE) {
+ ERTS_INTERNAL_ERROR("Only write flag present");
}
-}
-static char *
-rw_op_str(Uint16 flags)
-{
- switch (flags & ERTS_LC_FLG_LO_READ_WRITE) {
- case ERTS_LC_FLG_LO_READ_WRITE:
- return " (rw)";
- case ERTS_LC_FLG_LO_READ:
- return " (r)";
- case ERTS_LC_FLG_LO_WRITE:
- ERTS_INTERNAL_ERROR("Only write flag present");
- default:
- break;
- }
- return "";
+ return erts_lock_options_get_short_desc(options);
}
typedef struct erts_lc_locked_lock_t_ erts_lc_locked_lock_t;
@@ -244,7 +224,8 @@ struct erts_lc_locked_lock_t_ {
Sint16 id;
char *file;
unsigned int line;
- Uint16 flags;
+ erts_lock_flags_t flags;
+ erts_lock_options_t taken_options;
};
typedef struct {
@@ -431,7 +412,7 @@ make_my_locked_locks(void)
}
static ERTS_INLINE erts_lc_locked_lock_t *
-new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
+new_locked_lock(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
@@ -441,12 +422,13 @@ new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
l_lck->extra = lck->extra;
l_lck->file = file;
l_lck->line = line;
- l_lck->flags = lck->flags | op_flags;
+ l_lck->flags = lck->flags;
+ l_lck->taken_options = options;
return l_lck;
}
static void
-raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
+raw_print_lock(char *prefix, Sint16 id, Wterm extra, erts_lock_flags_t flags,
char* file, unsigned int line, char *suffix)
{
char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
@@ -458,16 +440,16 @@ raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
erts_fprintf(stderr,"%p",_unchecked_boxed_val(extra));
else
erts_fprintf(stderr,"%T",extra);
- erts_fprintf(stderr,"%s",lock_type(flags));
+ erts_fprintf(stderr,"[%s]",erts_lock_flags_get_type_name(flags));
if (file)
erts_fprintf(stderr,"(%s:%d)",file,line);
- erts_fprintf(stderr,"'%s%s",rw_op_str(flags),suffix);
+ erts_fprintf(stderr,"'(%s)%s",rw_op_str(flags),suffix);
}
static void
-print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix)
+print_lock2(char *prefix, Sint16 id, Wterm extra, erts_lock_flags_t flags, char *suffix)
{
raw_print_lock(prefix, id, extra, flags, NULL, 0, suffix);
}
@@ -522,9 +504,9 @@ uninitialized_lock(void)
static void
lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
- Uint16 op_flags)
+ erts_lock_options_t options)
{
- erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
+ erts_fprintf(stderr, "%s (%s)", prefix, rw_op_str(options));
print_lock(" ", lck, " lock which is already locked by thread!\n");
print_curr_locks(l_lcks);
lc_abort();
@@ -532,9 +514,9 @@ lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
static void
unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
- Uint16 op_flags)
+ erts_lock_options_t options)
{
- erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
+ erts_fprintf(stderr, "Unlocking (%s) ", rw_op_str(options));
print_lock("", lck, " lock which mismatch previous lock operation!\n");
print_curr_locks(l_lcks);
lc_abort();
@@ -745,84 +727,128 @@ erts_lc_get_lock_order_id(char *name)
return (Sint16) -1;
}
+static int compare_locked_by_id(erts_lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
+{
+ if(locked_lock->id < comparand->id) {
+ return -1;
+ } else if(locked_lock->id > comparand->id) {
+ return 1;
+ }
-static int
-find_lock(erts_lc_locked_lock_t **l_lcks, erts_lc_lock_t *lck)
+ return 0;
+}
+
+static int compare_locked_by_id_extra(erts_lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
{
- erts_lc_locked_lock_t *l_lck = *l_lcks;
+ int order = compare_locked_by_id(locked_lock, comparand);
+
+ if(order) {
+ return order;
+ } else if(locked_lock->extra < comparand->extra) {
+ return -1;
+ } else if(locked_lock->extra > comparand->extra) {
+ return 1;
+ }
- if (l_lck) {
- if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
- if ((l_lck->flags & lck->flags) == lck->flags)
- return 1;
- return 0;
- }
- else if (l_lck->id < lck->id
- || (l_lck->id == lck->id
- && l_lck->extra < lck->extra)) {
- for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
- if (l_lck->id > lck->id
- || (l_lck->id == lck->id
- && l_lck->extra >= lck->extra)) {
- *l_lcks = l_lck;
- if (l_lck->id == lck->id
- && l_lck->extra == lck->extra
- && ((l_lck->flags & lck->flags) == lck->flags))
- return 1;
- return 0;
- }
- }
- }
- else {
- for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id < lck->id
- || (l_lck->id == lck->id
- && l_lck->extra <= lck->extra)) {
- *l_lcks = l_lck;
- if (l_lck->id == lck->id
- && l_lck->extra == lck->extra
- && ((l_lck->flags & lck->flags) == lck->flags))
- return 1;
- return 0;
- }
- }
- }
+ return 0;
+}
+
+typedef int (*locked_compare_func)(erts_lc_locked_lock_t *, erts_lc_lock_t *);
+
+/* Searches through a list of taken locks, bailing when it hits an entry whose
+ * order relative to the search template is the opposite of the one at the
+ * start of the search. (*closest_neighbor) is either set to the exact match,
+ * or the one closest to it in the sort order. */
+static int search_locked_list(locked_compare_func compare,
+ erts_lc_locked_lock_t *locked_locks,
+ erts_lc_lock_t *search_template,
+ erts_lc_locked_lock_t **closest_neighbor)
+{
+ erts_lc_locked_lock_t *iterator = locked_locks;
+
+ (*closest_neighbor) = iterator;
+
+ if(iterator) {
+ int relative_order = compare(iterator, search_template);
+
+ if(relative_order < 0) {
+ while((iterator = iterator->next) != NULL) {
+ relative_order = compare(iterator, search_template);
+
+ if(relative_order >= 0) {
+ (*closest_neighbor) = iterator;
+ break;
+ }
+ }
+ } else if(relative_order > 0) {
+ while((iterator = iterator->prev) != NULL) {
+ relative_order = compare(iterator, search_template);
+
+ if(relative_order <= 0) {
+ (*closest_neighbor) = iterator;
+ break;
+ }
+ }
+ }
+
+ return relative_order == 0;
}
+
return 0;
}
+/* Searches for a lock in the given list that matches search_template, and sets
+ * (*locked_locks) to the closest lock in the sort order. */
static int
-find_id(erts_lc_locked_lock_t **l_lcks, Sint16 id)
-{
- erts_lc_locked_lock_t *l_lck = *l_lcks;
-
- if (l_lck) {
- if (l_lck->id == id)
- return 1;
- else if (l_lck->id < id) {
- for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
- if (l_lck->id >= id) {
- *l_lcks = l_lck;
- if (l_lck->id == id)
- return 1;
- return 0;
- }
- }
- }
- else {
- for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id <= id) {
- *l_lcks = l_lck;
- if (l_lck->id == id)
- return 1;
- return 0;
- }
- }
- }
+find_lock(erts_lc_locked_lock_t **locked_locks, erts_lc_lock_t *search_template)
+{
+ erts_lc_locked_lock_t *closest_neighbor;
+ int found_lock;
+
+ found_lock = search_locked_list(compare_locked_by_id_extra,
+ (*locked_locks),
+ search_template,
+ &closest_neighbor);
+
+ (*locked_locks) = closest_neighbor;
+
+ if(found_lock) {
+ erts_lock_options_t relevant_options;
+ erts_lock_flags_t relevant_flags;
+
+ /* We only care about the options and flags that are set in the
+ * template. */
+ relevant_options = (closest_neighbor->taken_options & search_template->taken_options);
+ relevant_flags = (closest_neighbor->flags & search_template->flags);
+
+ return search_template->taken_options == relevant_options &&
+ search_template->flags == relevant_flags;
}
+
return 0;
}
+/* Searches for a lock in the given list by id, and sets (*locked_locks) to the
+ * closest lock in the sort order. */
+static int
+find_id(erts_lc_locked_lock_t **locked_locks, Sint16 id)
+{
+ erts_lc_locked_lock_t *closest_neighbor;
+ erts_lc_lock_t search_template;
+ int found_lock;
+
+ search_template.id = id;
+
+ found_lock = search_locked_list(compare_locked_by_id,
+ (*locked_locks),
+ &search_template,
+ &closest_neighbor);
+
+ (*locked_locks) = closest_neighbor;
+
+ return found_lock;
+}
+
void
erts_lc_have_locks(int *resv, erts_lc_lock_t *locks, int len)
{
@@ -918,17 +944,17 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
}
void
-erts_lc_check_no_locked_of_type(Uint16 flags)
+erts_lc_check_no_locked_of_type(erts_lock_flags_t type)
{
erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
if (l_lcks) {
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
- if (l_lck->flags & flags) {
+ if ((l_lck->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == type) {
erts_fprintf(stderr,
"Locked lock of type %s found which isn't "
"allowed here!\n",
- lock_type(l_lck->flags));
+ erts_lock_flags_get_type_name(l_lck->flags));
print_curr_locks(l_lcks);
lc_abort();
}
@@ -937,7 +963,7 @@ erts_lc_check_no_locked_of_type(Uint16 flags)
}
int
-erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
#ifdef ERTS_LC_DO_NOT_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
return 0;
@@ -986,7 +1012,7 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, op_flags);
+ lock_twice("Trylocking", l_lcks, lck, options);
break;
}
}
@@ -1008,7 +1034,7 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
#endif
}
-void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
@@ -1021,7 +1047,7 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
return;
l_lcks = make_my_locked_locks();
- l_lck = locked ? new_locked_lock(lck, op_flags, file, line) : NULL;
+ l_lck = locked ? new_locked_lock(lck, options, file, line) : NULL;
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1039,7 +1065,7 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, op_flags);
+ lock_twice("Trylocking", l_lcks, lck, options);
if (locked) {
l_lck->next = tl_lck->next;
l_lck->prev = tl_lck;
@@ -1062,14 +1088,14 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
}
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
if (!find_lock(&l_lck, lck))
required_not_locked(l_lcks, lck);
- l_lck = new_locked_lock(lck, op_flags, file, line);
+ l_lck = new_locked_lock(lck, options, file, line);
if (!l_lcks->required.last) {
ASSERT(!l_lcks->required.first);
l_lck->next = l_lck->prev = NULL;
@@ -1109,7 +1135,7 @@ void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
}
}
-void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
@@ -1137,7 +1163,7 @@ void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
lc_free((void *) l_lck);
}
-void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
@@ -1150,7 +1176,7 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
return;
l_lcks = make_my_locked_locks();
- l_lck = new_locked_lock(lck, op_flags, file, line);
+ l_lck = new_locked_lock(lck, options, file, line);
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1166,12 +1192,12 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
l_lcks->locked.last = l_lck;
}
else if (l_lcks->locked.last->id == lck->id && l_lcks->locked.last->extra == lck->extra)
- lock_twice("Locking", l_lcks, lck, op_flags);
+ lock_twice("Locking", l_lcks, lck, options);
else
lock_order_violation(l_lcks, lck);
}
-void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1192,8 +1218,8 @@ void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
for (l_lck = l_lcks ? l_lcks->locked.last : NULL; l_lck; l_lck = l_lck->prev) {
if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
- if ((l_lck->flags & ERTS_LC_FLG_LO_ALL) != op_flags)
- unlock_op_mismatch(l_lcks, lck, op_flags);
+ if ((l_lck->taken_options & ERTS_LOCK_OPTIONS_RDWR) != options)
+ unlock_op_mismatch(l_lcks, lck, options);
if (l_lck->prev)
l_lck->prev->next = l_lck->next;
else
@@ -1210,7 +1236,7 @@ void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
unlock_of_not_locked(l_lcks, lck);
}
-void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1274,23 +1300,25 @@ void erts_lc_unrequire_lock(erts_lc_lock_t *lck)
}
void
-erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
+erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags)
{
lck->id = erts_lc_get_lock_order_id(name);
lck->extra = (UWord) &lck->extra;
ASSERT(is_not_immed(lck->extra));
lck->flags = flags;
+ lck->taken_options = 0;
lck->inited = ERTS_LC_INITITALIZED;
}
void
-erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra)
+erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra)
{
lck->id = erts_lc_get_lock_order_id(name);
lck->extra = extra;
ASSERT(is_immed(lck->extra));
lck->flags = flags;
+ lck->taken_options = 0;
lck->inited = ERTS_LC_INITITALIZED;
}
@@ -1304,6 +1332,7 @@ erts_lc_destroy_lock(erts_lc_lock_t *lck)
lck->id = -1;
lck->extra = THE_NON_VALUE;
lck->flags = 0;
+ lck->taken_options = 0;
}
void
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index 18296d1fec..8c754a8dfa 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -36,6 +36,8 @@
#ifdef ERTS_ENABLE_LOCK_CHECK
+#include "erl_lock_flags.h"
+
#ifndef ERTS_ENABLE_LOCK_POSITION
/* Enable in order for _x variants of mtx functions to be used. */
#define ERTS_ENABLE_LOCK_POSITION 1
@@ -44,36 +46,14 @@
typedef struct {
int inited;
Sint16 id;
- Uint16 flags;
+ erts_lock_flags_t flags;
+ erts_lock_options_t taken_options;
UWord extra;
} erts_lc_lock_t;
#define ERTS_LC_INITITALIZED 0x7f7f7f7f
-
-#define ERTS_LC_FLG_LT_SPINLOCK (((Uint16) 1) << 0)
-#define ERTS_LC_FLG_LT_RWSPINLOCK (((Uint16) 1) << 1)
-#define ERTS_LC_FLG_LT_MUTEX (((Uint16) 1) << 2)
-#define ERTS_LC_FLG_LT_RWMUTEX (((Uint16) 1) << 3)
-#define ERTS_LC_FLG_LT_PROCLOCK (((Uint16) 1) << 4)
-
-#define ERTS_LC_FLG_LO_READ (((Uint16) 1) << 5)
-#define ERTS_LC_FLG_LO_WRITE (((Uint16) 1) << 6)
-
-#define ERTS_LC_FLG_LO_READ_WRITE (ERTS_LC_FLG_LO_READ \
- | ERTS_LC_FLG_LO_WRITE)
-
-#define ERTS_LC_FLG_LT_ALL (ERTS_LC_FLG_LT_SPINLOCK \
- | ERTS_LC_FLG_LT_RWSPINLOCK \
- | ERTS_LC_FLG_LT_MUTEX \
- | ERTS_LC_FLG_LT_RWMUTEX \
- | ERTS_LC_FLG_LT_PROCLOCK)
-
-#define ERTS_LC_FLG_LO_ALL (ERTS_LC_FLG_LO_READ \
- | ERTS_LC_FLG_LO_WRITE)
-
-
-#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), (X)}
+#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), 0, (X)}
void erts_lc_init(void);
void erts_lc_late_init(void);
@@ -83,31 +63,31 @@ void erts_lc_check(erts_lc_lock_t *have, int have_len,
void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
-void erts_lc_check_no_locked_of_type(Uint16 flags);
-int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_check_no_locked_of_type(erts_lock_flags_t flags);
+int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
int erts_lc_trylock_force_busy(erts_lc_lock_t *lck);
void erts_lc_trylock_x(int locked, erts_lc_lock_t *lck,
char* file, unsigned int line);
void erts_lc_lock_x(erts_lc_lock_t *lck, char* file, unsigned int line);
void erts_lc_unlock(erts_lc_lock_t *lck);
void erts_lc_might_unlock(erts_lc_lock_t *lck);
-void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags);
-void erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra);
+void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags);
+void erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra);
void erts_lc_destroy_lock(erts_lc_lock_t *lck);
void erts_lc_fail(char *fmt, ...);
int erts_lc_assert_failed(char *file, int line, char *assertion);
void erts_lc_set_thread_name(char *thread_name);
void erts_lc_pll(void);
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line);
void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 678bc43f04..d2e8f47d59 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -18,51 +18,37 @@
* %CopyrightEnd%
*/
-/*
- * Description: Statistics for locks.
- *
- * Author: Björn-Egil Dahlberg
- * Date: 2008-07-03
- */
-
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
-/* Needed for VxWorks va_arg */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
#include "sys.h"
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#include "global.h"
#include "erl_lock_count.h"
-#include "ethread.h"
-#include "erl_term.h"
-#include "atom.h"
-#include <stdio.h>
-
-/* globals, dont access these without locks or blocks */
+#include "erl_thr_progress.h"
-ethr_mutex lcnt_data_lock;
-erts_lcnt_data_t *erts_lcnt_data;
-Uint16 erts_lcnt_rt_options;
-erts_lcnt_time_t timer_start;
-const char *str_undefined = "undefined";
+#include "erl_node_tables.h"
+#include "erl_alloc_util.h"
+#include "erl_check_io.h"
+#include "erl_poll.h"
+#include "erl_db.h"
-static ethr_tsd_key lcnt_thr_data_key;
-static int lcnt_n_thr;
-static erts_lcnt_thread_data_t *lcnt_thread_data[2048];
+#define LCNT_MAX_CARRIER_ENTRIES 255
-/* local functions */
+/* - Locals that are shared with the header implementation - */
-static ERTS_INLINE void lcnt_lock(void) {
- ethr_mutex_lock(&lcnt_data_lock);
-}
+#ifdef DEBUG
+int lcnt_initialization_completed__;
+#endif
-static ERTS_INLINE void lcnt_unlock(void) {
- ethr_mutex_unlock(&lcnt_data_lock);
-}
+erts_lock_flags_t lcnt_category_mask__;
+ethr_tsd_key lcnt_thr_data_key__;
-const int log2_tab64[64] = {
+const int lcnt_log2_tab64__[64] = {
63, 0, 58, 1, 59, 47, 53, 2,
60, 39, 48, 27, 54, 33, 42, 3,
61, 51, 37, 40, 49, 18, 28, 20,
@@ -72,635 +58,624 @@ const int log2_tab64[64] = {
56, 45, 25, 31, 35, 16, 9, 12,
44, 24, 15, 8, 23, 7, 6, 5};
-static ERTS_INLINE int lcnt_log2(Uint64 v) {
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- v |= v >> 32;
- return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
-}
-
-static char* lcnt_lock_type(Uint16 flag) {
- switch(flag & ERTS_LCNT_LT_ALL) {
- case ERTS_LCNT_LT_SPINLOCK: return "spinlock";
- case ERTS_LCNT_LT_RWSPINLOCK: return "rw_spinlock";
- case ERTS_LCNT_LT_MUTEX: return "mutex";
- case ERTS_LCNT_LT_RWMUTEX: return "rw_mutex";
- case ERTS_LCNT_LT_PROCLOCK: return "proclock";
- default: return "";
- }
-}
+/* - Local variables - */
-static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
- ethr_atomic_set(&stats->tries, 0);
- ethr_atomic_set(&stats->colls, 0);
- stats->timer.s = 0;
- stats->timer.ns = 0;
- stats->timer_n = 0;
- stats->file = (char *)str_undefined;
- stats->line = 0;
- sys_memzero(stats->hist.ns, sizeof(stats->hist.ns));
-}
+typedef struct lcnt_static_lock_ref_ {
+ erts_lcnt_ref_t *reference;
-static void lcnt_time(erts_lcnt_time_t *time) {
- /*
- * erts_sys_hrtime() is the highest resolution
- * we could find, it may or may not be monotonic...
- */
- ErtsMonotonicTime mtime = erts_sys_hrtime();
- time->s = (unsigned long) (mtime / 1000000000LL);
- time->ns = (unsigned long) (mtime - 1000000000LL*time->s);
-}
+ erts_lock_flags_t flags;
+ const char *name;
+ Eterm id;
-static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
- long ds;
- long dns;
+ struct lcnt_static_lock_ref_ *next;
+} lcnt_static_lock_ref_t;
- ds = t1->s - t0->s;
- dns = t1->ns - t0->ns;
+static ethr_atomic_t lcnt_static_lock_registry;
- /* the difference should not be able to get bigger than 1 sec in ns*/
+static erts_lcnt_lock_info_list_t lcnt_current_lock_list;
+static erts_lcnt_lock_info_list_t lcnt_deleted_lock_list;
- if (dns < 0) {
- ds -= 1;
- dns += 1000000000LL;
- }
+static erts_lcnt_time_t lcnt_timer_start;
- ASSERT(ds >= 0);
+static int lcnt_preserve_info;
- d->s = ds;
- d->ns = dns;
-}
+/* local functions */
+
+static void lcnt_clear_stats(erts_lcnt_lock_info_t *info) {
+ size_t i;
+
+ for(i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
-/* difference d must be non-negative */
+ sys_memzero(&stats->wait_time_histogram, sizeof(stats->wait_time_histogram));
-static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
- t->s += d->s;
- t->ns += d->ns;
+ stats->total_time_waited.s = 0;
+ stats->total_time_waited.ns = 0;
- t->s += t->ns / 1000000000LL;
- t->ns = t->ns % 1000000000LL;
+ stats->times_waited = 0;
+
+ stats->file = NULL;
+ stats->line = 0;
+
+ ethr_atomic_set(&stats->attempts, 0);
+ ethr_atomic_set(&stats->collisions, 0);
+ }
+
+ info->location_count = 1;
}
-static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
- erts_lcnt_thread_data_t *eltd;
+static lcnt_thread_data_t__ *lcnt_thread_data_alloc(void) {
+ lcnt_thread_data_t__ *eltd =
+ (lcnt_thread_data_t__*)malloc(sizeof(lcnt_thread_data_t__));
- eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
- if (!eltd) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ if(!eltd) {
+ ERTS_INTERNAL_ERROR("Failed to allocate lcnt thread data.");
}
+
eltd->timer_set = 0;
eltd->lock_in_conflict = 0;
- eltd->id = lcnt_n_thr++;
- /* set thread data to array */
- lcnt_thread_data[eltd->id] = eltd;
-
return eltd;
}
-static erts_lcnt_thread_data_t *lcnt_get_thread_data(void) {
- return (erts_lcnt_thread_data_t *)ethr_tsd_get(lcnt_thr_data_key);
-}
+/* - List operations -
+ *
+ * Info entries are kept in a doubly linked list where each entry is locked
+ * with its neighbors rather than a global lock. Deletion is rather quick, but
+ * insertion is still serial since the head becomes a de facto global lock.
+ *
+ * We rely on ad-hoc spinlocks to avoid "recursing" into this module. */
-/* debug */
+#define LCNT_SPINLOCK_YIELD_ITERATIONS 50
-#if 0
-static char* lock_opt(Uint16 flag) {
- if ((flag & ERTS_LCNT_LO_WRITE) && (flag & ERTS_LCNT_LO_READ)) return "rw";
- if (flag & ERTS_LCNT_LO_READ ) return "r ";
- if (flag & ERTS_LCNT_LO_WRITE) return " w";
- return "--";
-}
+#define LCNT_SPINLOCK_HELPER_INIT \
+ Uint failed_spin_count = 0;
-static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) {
- erts_aint_t w_state, r_state;
- char *type;
+#define LCNT_SPINLOCK_HELPER_YIELD \
+ do { \
+ failed_spin_count++; \
+ if(!(failed_spin_count % LCNT_SPINLOCK_YIELD_ITERATIONS)) { \
+ erts_thr_yield(); \
+ } else { \
+ ERTS_SPIN_BODY; \
+ } \
+ } while(0)
- if (strcmp(lock->name, "run_queue") != 0) return;
- type = lcnt_lock_type(lock->flag);
- r_state = ethr_atomic_read(&lock->r_state);
- w_state = ethr_atomic_read(&lock->w_state);
+static void lcnt_unlock_list_entry(erts_lcnt_lock_info_t *info) {
+ ethr_atomic32_set_relb(&info->lock, 0);
+}
- if (lock->flag & flag) {
- erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n",
- action,
- lock->name,
- r_state,
- w_state,
- type,
- lock->id);
- }
+static int lcnt_try_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ return ethr_atomic32_cmpxchg_acqb(&info->lock, 1, 0) == 0;
}
-#endif
-static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- unsigned int i;
- erts_lcnt_lock_stats_t *stats = NULL;
+static void lcnt_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
- for (i = 0; i < lock->n_stats; i++) {
- if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
- return &(lock->stats[i]);
- }
- }
- if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
- stats = &lock->stats[lock->n_stats];
- lock->n_stats++;
- stats->file = file;
- stats->line = line;
- return stats;
- }
+ while(!lcnt_try_lock_list_entry(info)) {
+ LCNT_SPINLOCK_HELPER_YIELD;
}
- return &lock->stats[0];
}
-static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_wait) {
- int idx;
- unsigned long r;
+static void lcnt_lock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
- if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
- idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
- } else {
- r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
- if (r) idx = lcnt_log2(r);
- else idx = 0;
+ for(;;) {
+ if(!lcnt_try_lock_list_entry(info))
+ goto retry_after_entry_failed;
+ if(!lcnt_try_lock_list_entry(info->next))
+ goto retry_after_next_failed;
+ if(!lcnt_try_lock_list_entry(info->prev))
+ goto retry_after_prev_failed;
+
+ return;
+
+ retry_after_prev_failed:
+ lcnt_unlock_list_entry(info->next);
+ retry_after_next_failed:
+ lcnt_unlock_list_entry(info);
+ retry_after_entry_failed:
+ LCNT_SPINLOCK_HELPER_YIELD;
}
- hist->ns[idx]++;
}
-static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict,
- erts_lcnt_time_t *time_wait) {
+static void lcnt_unlock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ lcnt_unlock_list_entry(info->prev);
+ lcnt_unlock_list_entry(info->next);
+ lcnt_unlock_list_entry(info);
+}
- ethr_atomic_inc(&stats->tries);
+static void lcnt_insert_list_entry(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t *info) {
+ erts_lcnt_lock_info_t *next, *prev;
- if (lock_in_conflict)
- ethr_atomic_inc(&stats->colls);
+ prev = &list->head;
- if (time_wait) {
- lcnt_time_add(&(stats->timer), time_wait);
- stats->timer_n++;
- lcnt_update_stats_hist(&stats->hist,time_wait);
- }
-}
+ lcnt_lock_list_entry(prev);
-/* interface */
+ next = prev->next;
-void erts_lcnt_init() {
- erts_lcnt_thread_data_t *eltd = NULL;
+ lcnt_lock_list_entry(next);
- /* init lock */
- if (ethr_mutex_init(&lcnt_data_lock) != 0) abort();
+ info->next = next;
+ info->prev = prev;
- /* init tsd */
- lcnt_n_thr = 0;
- ethr_tsd_key_create(&lcnt_thr_data_key, "lcnt_data");
+ prev->next = info;
+ next->prev = info;
- lcnt_lock();
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
+}
+
+static void lcnt_insert_list_carrier(erts_lcnt_lock_info_list_t *list,
+ erts_lcnt_lock_info_carrier_t *carrier) {
+ erts_lcnt_lock_info_t *next, *prev;
+ size_t i;
- erts_lcnt_rt_options = ERTS_LCNT_OPT_LOCATION | ERTS_LCNT_OPT_PROCLOCK;
- eltd = lcnt_thread_data_alloc();
- ethr_tsd_set(lcnt_thr_data_key, eltd);
+ for(i = 0; i < carrier->entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[i];
- /* init lcnt structure */
- erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
- if (!erts_lcnt_data) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ info->prev = &carrier->entries[i - 1];
+ info->next = &carrier->entries[i + 1];
}
- erts_lcnt_data->current_locks = erts_lcnt_list_init();
- erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
- lcnt_unlock();
+ prev = &list->head;
+
+ lcnt_lock_list_entry(prev);
+
+ next = prev->next;
+
+ lcnt_lock_list_entry(next);
+
+ next->prev = &carrier->entries[carrier->entry_count - 1];
+ carrier->entries[carrier->entry_count - 1].next = next;
+ prev->next = &carrier->entries[0];
+ carrier->entries[0].prev = prev;
+
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
}
-void erts_lcnt_late_init() {
- /* set start timer and zero statistics */
- erts_lcnt_clear_counters();
- erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
+static void lcnt_init_list(erts_lcnt_lock_info_list_t *list) {
+ /* Ensure that ref_count operations explode when touching the sentinels in
+ * DEBUG mode. */
+ ethr_atomic_init(&(list->head.ref_count), -1);
+ ethr_atomic_init(&(list->tail.ref_count), -1);
+
+ ethr_atomic32_init(&(list->head.lock), 0);
+ (list->head).next = &list->tail;
+ (list->head).prev = &list->tail;
+
+ ethr_atomic32_init(&(list->tail.lock), 0);
+ (list->tail).next = &list->head;
+ (list->tail).prev = &list->head;
}
-/* list operations */
+/* - Carrier operations - */
-/* BEGIN ASSUMPTION: lcnt_data_lock taken */
+int lcnt_thr_progress_unmanaged_delay__(void) {
+ return erts_thr_progress_unmanaged_delay();
+}
-erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
- erts_lcnt_lock_list_t *list;
+void lcnt_thr_progress_unmanaged_continue__(int handle) {
+ return erts_thr_progress_unmanaged_continue(handle);
+}
- list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
- if (!list) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
- }
- list->head = NULL;
- list->tail = NULL;
- list->n = 0;
- return list;
+void lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == 0);
+ erts_free(ERTS_ALC_T_LCNT_CARRIER, (void*)carrier);
}
-static void lcnt_list_free(erts_lcnt_lock_t *head) {
- erts_lcnt_lock_t *lock, *next;
+static void lcnt_thr_prg_cleanup_carrier(void *data) {
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t entry_count, i;
+
+ /* carrier->entry_count will be replaced with garbage if it's deallocated
+ * on the final iteration, so we'll tuck it away to get a clean exit. */
+ entry_count = carrier->entry_count;
- lock = head;
+ for(i = 0; i < entry_count; i++) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) >= (entry_count - i));
- while(lock != NULL) {
- next = lock->next;
- free(lock);
- lock = next;
+ erts_lcnt_release_lock_info(&carrier->entries[i]);
}
}
-void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_t *tail = NULL;
+static void lcnt_schedule_carrier_cleanup(void *data) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ /* We can't issue cleanup jobs on anything other than normal schedulers, so
+ * we move to the first scheduler if required. */
- tail = list->tail;
- if (tail) {
- tail->next = lock;
- lock->prev = tail;
+ if(!esdp || esdp->type != ERTS_SCHED_NORMAL) {
+ erts_schedule_misc_aux_work(1, &lcnt_schedule_carrier_cleanup, data);
} else {
- list->head = lock;
- lock->prev = NULL;
- ASSERT(!lock->next);
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t carrier_size;
+
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * carrier->entry_count;
+
+ erts_schedule_thr_prgr_later_cleanup_op(&lcnt_thr_prg_cleanup_carrier,
+ data, (ErtsThrPrgrLaterOp*)&carrier->release_entries, carrier_size);
}
- lock->next = NULL;
- list->tail = lock;
+}
- list->n++;
+static void lcnt_info_deallocate(erts_lcnt_lock_info_t *info) {
+ lcnt_release_carrier__(info->carrier);
}
-void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
- if (lock->next) lock->next->prev = lock->prev;
- if (lock->prev) lock->prev->next = lock->next;
- if (list->head == lock) list->head = lock->next;
- if (list->tail == lock) list->tail = lock->prev;
+static void lcnt_info_dispose(erts_lcnt_lock_info_t *info) {
+ ASSERT(ethr_atomic_read(&info->ref_count) == 0);
+
+ if(lcnt_preserve_info) {
+ ethr_atomic_set(&info->ref_count, 1);
+
+ /* Move straight to deallocation the next time around. */
+ info->dispose = &lcnt_info_deallocate;
- lock->prev = NULL;
- lock->next = NULL;
- list->n--;
+ lcnt_insert_list_entry(&lcnt_deleted_lock_list, info);
+ } else {
+ lcnt_info_deallocate(info);
+ }
}
-/* END ASSUMPTION: lcnt_data_lock taken */
+static void lcnt_lock_info_init_helper(erts_lcnt_lock_info_t *info) {
+ ethr_atomic_init(&info->ref_count, 1);
+ ethr_atomic32_init(&info->lock, 0);
+
+ ethr_atomic_init(&info->r_state, 0);
+ ethr_atomic_init(&info->w_state, 0);
-/* lock operations */
+ info->dispose = &lcnt_info_dispose;
-/* interface to erl_threads.h */
-/* only lock on init and destroy, all others should use atomics */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
- erts_lcnt_init_lock_x(lock, name, flag, NIL);
+ lcnt_clear_stats(info);
}
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
- int i;
+erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int entry_count) {
+ erts_lcnt_lock_info_carrier_t *result;
+ size_t carrier_size, i;
- if (flag & ERTS_LCNT_LT_DISABLE) {
- ERTS_LCNT_CLEAR_FLAG(lock);
- return;
- }
+ ASSERT(entry_count > 0 && entry_count <= LCNT_MAX_CARRIER_ENTRIES);
+ ASSERT(lcnt_initialization_completed__);
- lock->next = NULL;
- lock->prev = NULL;
- lock->flag = flag;
- lock->name = name;
- lock->id = id;
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * entry_count;
- ethr_atomic_init(&lock->r_state, 0);
- ethr_atomic_init(&lock->w_state, 0);
-#ifdef DEBUG
- ethr_atomic_init(&lock->flowstate, 0);
-#endif
+ result = (erts_lcnt_lock_info_carrier_t*)erts_alloc(ERTS_ALC_T_LCNT_CARRIER, carrier_size);
+ result->entry_count = entry_count;
- lock->n_stats = 1;
+ ethr_atomic_init(&result->ref_count, entry_count);
- for (i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- lcnt_clear_stats(&lock->stats[i]);
- }
+ for(i = 0; i < entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &result->entries[i];
- lcnt_lock();
- erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
- lcnt_unlock();
-}
+ lcnt_lock_info_init_helper(info);
-/* init empty, instead of zero struct
- * used by process locks probes
- */
-void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock) {
- lock->next = NULL;
- lock->prev = NULL;
- lock->flag = 0;
- lock->name = NULL;
- lock->id = NIL;
- ethr_atomic_init(&lock->r_state, 0);
- ethr_atomic_init(&lock->w_state, 0);
-#ifdef DEBUG
- ethr_atomic_init(&lock->flowstate, 0);
-#endif
- lock->n_stats = 0;
- sys_memzero(lock->stats, sizeof(lock->stats));
-}
-/* destroy lock */
-void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- lcnt_lock();
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_COPYSAVE) {
- erts_lcnt_lock_t *deleted_lock;
- /* copy structure and insert the copy */
- deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
- if (!deleted_lock) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
- }
- memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
- deleted_lock->next = NULL;
- deleted_lock->prev = NULL;
- erts_lcnt_list_insert(erts_lcnt_data->deleted_locks, deleted_lock);
+ info->carrier = result;
}
- /* delete original */
- erts_lcnt_list_delete(erts_lcnt_data->current_locks, lock);
- ERTS_LCNT_CLEAR_FLAG(lock);
- lcnt_unlock();
+ return result;
}
-/* lock */
+void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier) {
+ ethr_sint_t swapped_carrier;
-void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- erts_aint_t r_state = 0, w_state = 0;
- erts_lcnt_thread_data_t *eltd;
+#ifdef DEBUG
+ int i;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ /* Verify that all locks share the same categories/static property; all
+ * other flags are fair game. */
+ for(i = 1; i < carrier->entry_count; i++) {
+ const erts_lock_flags_t SIGNIFICANT_DIFF_MASK =
+ ERTS_LOCK_FLAGS_MASK_CATEGORY | ERTS_LOCK_FLAGS_PROPERTY_STATIC;
- eltd = lcnt_get_thread_data();
- ASSERT(eltd);
+ erts_lcnt_lock_info_t *previous, *current;
- w_state = ethr_atomic_read(&lock->w_state);
+ previous = &carrier->entries[i - 1];
+ current = &carrier->entries[i];
- if (option & ERTS_LCNT_LO_WRITE) {
- r_state = ethr_atomic_read(&lock->r_state);
- ethr_atomic_inc( &lock->w_state);
- }
- if (option & ERTS_LCNT_LO_READ) {
- ethr_atomic_inc( &lock->r_state);
+ ASSERT(!((previous->flags ^ current->flags) & SIGNIFICANT_DIFF_MASK));
}
+#endif
- /* we cannot acquire w_lock if either w or r are taken */
- /* we cannot acquire r_lock if w_lock is taken */
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)carrier, (ethr_sint_t)NULL);
- if ((w_state > 0) || (r_state > 0)) {
- eltd->lock_in_conflict = 1;
- if (eltd->timer_set == 0) {
- lcnt_time(&eltd->timer);
- }
- eltd->timer_set++;
+ if(swapped_carrier != (ethr_sint_t)NULL) {
+#ifdef DEBUG
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == carrier->entry_count);
+ ethr_atomic_set(&carrier->ref_count, 0);
+#endif
+
+ lcnt_deallocate_carrier__(carrier);
} else {
- eltd->lock_in_conflict = 0;
+ lcnt_insert_list_carrier(&lcnt_current_lock_list, carrier);
}
}
-void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
- erts_aint_t w_state;
- erts_lcnt_thread_data_t *eltd;
+void erts_lcnt_uninstall(erts_lcnt_ref_t *ref) {
+ ethr_sint_t previous_carrier, swapped_carrier;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ previous_carrier = ethr_atomic_read(ref);
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)NULL, previous_carrier);
- w_state = ethr_atomic_read(&lock->w_state);
- ethr_atomic_inc(&lock->w_state);
- eltd = lcnt_get_thread_data();
+ if(previous_carrier && previous_carrier == swapped_carrier) {
+ lcnt_schedule_carrier_cleanup((void*)previous_carrier);
+ }
+}
- ASSERT(eltd);
+/* - Static lock registry -
+ *
+ * Since static locks can be trusted to never disappear, we can track them
+ * pretty cheaply and won't need to bother writing an "erts_lcnt_update_xx"
+ * variant. */
+
+static void lcnt_init_static_lock_registry(void) {
+ ethr_atomic_init(&lcnt_static_lock_registry, (ethr_sint_t)NULL);
+}
+
+static void lcnt_update_static_locks(void) {
+ lcnt_static_lock_ref_t *iterator =
+ (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
+
+ while(iterator != NULL) {
+ if(!erts_lcnt_check_enabled(iterator->flags)) {
+ erts_lcnt_uninstall(iterator->reference);
+ } else if(!erts_lcnt_check_ref_installed(iterator->reference)) {
+ erts_lcnt_lock_info_carrier_t *carrier = erts_lcnt_create_lock_info_carrier(1);
+
+ erts_lcnt_init_lock_info_idx(carrier, 0, iterator->name, iterator->id, iterator->flags);
- if (w_state > 0) {
- eltd->lock_in_conflict = 1;
- /* only set the timer if nobody else has it
- * This should only happen when proc_locks aquires several locks
- * 'atomicly'. All other locks will block the thread if w_state > 0
- * i.e. locked.
- */
- if (eltd->timer_set == 0) {
- lcnt_time(&eltd->timer);
+ erts_lcnt_install(iterator->reference, carrier);
}
- eltd->timer_set++;
- } else {
- eltd->lock_in_conflict = 0;
+
+ iterator = iterator->next;
}
}
-/* if a lock wasn't really a lock operation, bad bad process locks */
+void lcnt_register_static_lock__(erts_lcnt_ref_t *reference, const char *name, Eterm id,
+ erts_lock_flags_t flags) {
+ lcnt_static_lock_ref_t *lock = malloc(sizeof(lcnt_static_lock_ref_t));
+ int retry_insertion;
+
+ ASSERT(flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC);
+
+ lock->reference = reference;
+ lock->flags = flags;
+ lock->name = name;
+ lock->id = id;
+
+ do {
+ ethr_sint_t swapped_head;
-void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
- /* should check if this thread was "waiting" */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ lock->next = (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
- ethr_atomic_dec(&lock->w_state);
+ swapped_head = ethr_atomic_cmpxchg_acqb(
+ &lcnt_static_lock_registry,
+ (ethr_sint_t)lock,
+ (ethr_sint_t)lock->next);
+
+ retry_insertion = (swapped_head != (ethr_sint_t)lock->next);
+ } while(retry_insertion);
}
-/*
- * erts_lcnt_lock_post
- *
- * Used when we get a lock (i.e. directly after a lock operation)
- * if the timer was set then we had to wait for the lock
- * lock_post will calculate the wait time.
- */
+/* - Initialization - */
+
+void erts_lcnt_pre_thr_init() {
+ /* Ensure that the dependency hack mentioned in the header doesn't
+ * explode at runtime. */
+ ERTS_CT_ASSERT(sizeof(LcntThrPrgrLaterOp) >= sizeof(ErtsThrPrgrLaterOp));
+ ERTS_CT_ASSERT(ERTS_THR_PRGR_DHANDLE_MANAGED ==
+ (ErtsThrPrgrDelayHandle)LCNT_THR_PRGR_DHANDLE_MANAGED);
-void erts_lcnt_lock_post(erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_post_x(lock, (char*)str_undefined, 0);
+ lcnt_init_list(&lcnt_current_lock_list);
+ lcnt_init_list(&lcnt_deleted_lock_list);
+
+ lcnt_init_static_lock_registry();
}
-void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- erts_lcnt_thread_data_t *eltd;
- erts_lcnt_time_t timer;
- erts_lcnt_time_t time_wait;
- erts_lcnt_lock_stats_t *stats;
-#ifdef DEBUG
- erts_aint_t flowstate;
-#endif
+void erts_lcnt_post_thr_init() {
+ /* ASSUMPTION: this is safe since it runs prior to the creation of other
+ * threads (Directly after ethread init). */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ ethr_tsd_key_create(&lcnt_thr_data_key__, "lcnt_data");
+
+ erts_lcnt_thread_setup();
+}
+
+void erts_lcnt_late_init() {
+ /* Set start timer and zero all statistics */
+ erts_lcnt_clear_counters();
+ erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
#ifdef DEBUG
- if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc(&lock->flowstate);
- }
+ /* It's safe to use erts_alloc and thread progress past this point. */
+ lcnt_initialization_completed__ = 1;
#endif
+}
- eltd = lcnt_get_thread_data();
-
- ASSERT(eltd);
+void erts_lcnt_post_startup(void) {
+ /* Default to capturing everything to match the behavior of the old lock
+ * counter build. */
+ erts_lcnt_set_category_mask(ERTS_LOCK_FLAGS_MASK_CATEGORY);
+}
- /* if lock was in conflict, time it */
- stats = lcnt_get_lock_stats(lock, file, line);
- if (eltd->timer_set) {
- lcnt_time(&timer);
+void erts_lcnt_thread_setup() {
+ lcnt_thread_data_t__ *eltd = lcnt_thread_data_alloc();
- lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
- lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
- eltd->timer_set--;
- ASSERT(eltd->timer_set >= 0);
- } else {
- lcnt_update_stats(stats, eltd->lock_in_conflict, NULL);
- }
+ ASSERT(eltd);
+ ethr_tsd_set(lcnt_thr_data_key__, eltd);
}
-/* unlock */
+void erts_lcnt_thread_exit_handler() {
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
-void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_dec(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_dec(&lock->r_state);
+ if (eltd) {
+ free(eltd);
+ }
}
-void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+/* - BIF interface - */
+
+void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info) {
#ifdef DEBUG
- {
- erts_aint_t w_state;
- erts_aint_t flowstate;
-
- /* flowstate */
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 1);
- ethr_atomic_dec(&lock->flowstate);
-
- /* write state */
- w_state = ethr_atomic_read(&lock->w_state);
- ASSERT(w_state > 0);
- }
+ ASSERT(ethr_atomic_inc_read_acqb(&info->ref_count) >= 2);
+#else
+ ethr_atomic_inc_acqb(&info->ref_count);
#endif
- ethr_atomic_dec(&lock->w_state);
}
-/* trylock */
+void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info) {
+ ethr_sint_t count;
+
+ /* We need to acquire the lock before decrementing ref_count to avoid
+ * racing with list iteration; there's a short window between reading the
+ * reference to info and increasing its ref_count. */
+ lcnt_lock_list_entry_with_neighbors(info);
+
+ count = ethr_atomic_dec_read(&info->ref_count);
-void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- /* Determine lock_state via res instead of state */
- if (res != EBUSY) {
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_inc(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_inc(&lock->r_state);
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
+ ASSERT(count >= 0);
+
+ if(count > 0) {
+ lcnt_unlock_list_entry_with_neighbors(info);
} else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
+ (info->next)->prev = info->prev;
+ (info->prev)->next = info->next;
+
+ lcnt_unlock_list_entry_with_neighbors(info);
+
+ info->dispose(info);
}
}
+erts_lock_flags_t erts_lcnt_get_category_mask() {
+ return lcnt_category_mask__;
+}
-void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
- /* Determine lock_state via res instead of state */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- if (res != EBUSY) {
-#ifdef DEBUG
- {
- erts_aint_t flowstate;
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc( &lock->flowstate);
- }
+#ifdef ERTS_ENABLE_KERNEL_POLL
+/* erl_poll/erl_check_io only exports one of these variants at a time, and we
+ * may need to use either one depending on emulator startup flags. */
+void erts_lcnt_update_pollset_locks_nkp(int);
+void erts_lcnt_update_pollset_locks_kp(int);
+
+void erts_lcnt_update_cio_locks_nkp(int);
+void erts_lcnt_update_cio_locks_kp(int);
#endif
- ethr_atomic_inc(&lock->w_state);
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
- } else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
+
+void erts_lcnt_set_category_mask(erts_lock_flags_t mask) {
+ erts_lock_flags_t changed_categories;
+
+ ASSERT(!(mask & ~ERTS_LOCK_FLAGS_MASK_CATEGORY));
+ ASSERT(lcnt_initialization_completed__);
+
+ changed_categories = (lcnt_category_mask__ ^ mask);
+ lcnt_category_mask__ = mask;
+
+ if(changed_categories) {
+ lcnt_update_static_locks();
}
-}
-/* thread operations */
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION) {
+ erts_lcnt_update_distribution_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ }
-void erts_lcnt_thread_setup(void) {
- erts_lcnt_thread_data_t *eltd;
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
+ erts_lcnt_update_allocator_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+ }
- lcnt_lock();
- /* lock for thread id global update */
- eltd = lcnt_thread_data_alloc();
- lcnt_unlock();
- ASSERT(eltd);
- ethr_tsd_set(lcnt_thr_data_key, eltd);
-}
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_PROCESS) {
+ erts_lcnt_update_process_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
+ }
-void erts_lcnt_thread_exit_handler() {
- erts_lcnt_thread_data_t *eltd;
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_IO) {
+#ifdef ERTS_ENABLE_KERNEL_POLL
+ if(erts_use_kernel_poll) {
+ erts_lcnt_update_pollset_locks_kp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_cio_locks_kp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ } else {
+ erts_lcnt_update_pollset_locks_nkp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_cio_locks_nkp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+#else
+ erts_lcnt_update_pollset_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_cio_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+#endif
- eltd = ethr_tsd_get(lcnt_thr_data_key);
+ erts_lcnt_update_driver_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_port_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
- if (eltd) {
- free(eltd);
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DB) {
+ erts_lcnt_update_db_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DB);
}
}
-/* bindings for bifs */
-
-Uint16 erts_lcnt_set_rt_opt(Uint16 opt) {
- Uint16 prev;
- prev = (erts_lcnt_rt_options & opt);
- erts_lcnt_rt_options |= opt;
- return prev;
+void erts_lcnt_set_preserve_info(int enable) {
+ lcnt_preserve_info = enable;
}
-Uint16 erts_lcnt_clear_rt_opt(Uint16 opt) {
- Uint16 prev;
- prev = (erts_lcnt_rt_options & opt);
- erts_lcnt_rt_options &= ~opt;
- return prev;
+int erts_lcnt_get_preserve_info() {
+ return lcnt_preserve_info;
}
void erts_lcnt_clear_counters(void) {
- erts_lcnt_lock_t *lock;
- erts_lcnt_lock_list_t *list;
- erts_lcnt_lock_stats_t *stats;
- int i;
+ erts_lcnt_lock_info_t *iterator;
- lcnt_lock();
+ lcnt_time__(&lcnt_timer_start);
- list = erts_lcnt_data->current_locks;
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_current_lock_list, &iterator)) {
+ lcnt_clear_stats(iterator);
+ }
- for (lock = list->head; lock != NULL; lock = lock->next) {
- for( i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- stats = &lock->stats[i];
- lcnt_clear_stats(stats);
- }
- lock->n_stats = 1;
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_deleted_lock_list, &iterator)) {
+ erts_lcnt_release_lock_info(iterator);
}
+}
- lock = erts_lcnt_data->deleted_locks->head;
- erts_lcnt_data->deleted_locks->head = NULL;
- erts_lcnt_data->deleted_locks->tail = NULL;
- erts_lcnt_data->deleted_locks->n = 0;
+erts_lcnt_data_t erts_lcnt_get_data(void) {
+ erts_lcnt_time_t timer_stop;
+ erts_lcnt_data_t result;
- lcnt_time(&timer_start);
+ lcnt_time__(&timer_stop);
- lcnt_unlock();
+ result.timer_start = lcnt_timer_start;
- /* free deleted locks */
- lcnt_list_free(lock);
+ result.current_locks = &lcnt_current_lock_list;
+ result.deleted_locks = &lcnt_deleted_lock_list;
+
+ lcnt_time_diff__(&result.duration, &timer_stop, &result.timer_start);
+
+ return result;
}
-erts_lcnt_data_t *erts_lcnt_get_data(void) {
- erts_lcnt_time_t timer_stop;
+int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator) {
+ erts_lcnt_lock_info_t *current, *next;
- lcnt_lock();
+ current = *iterator ? *iterator : &list->head;
- lcnt_time(&timer_stop);
- lcnt_time_diff(&(erts_lcnt_data->duration), &timer_stop, &timer_start);
+ ASSERT(current != &list->tail);
- lcnt_unlock();
+ lcnt_lock_list_entry(current);
- return erts_lcnt_data;
-}
+ next = current->next;
+
+ if(next != &list->tail) {
+ erts_lcnt_retain_lock_info(next);
+ }
+
+ lcnt_unlock_list_entry(current);
+
+ if(current != &list->head) {
+ erts_lcnt_release_lock_info(current);
+ }
+
+ *iterator = next;
-char *erts_lcnt_lock_type(Uint16 type) {
- return lcnt_lock_type(type);
+ return next != &list->tail;
}
-#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
+#endif /* #ifdef ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 6caffbfe86..89d95a73cf 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -18,64 +18,51 @@
* %CopyrightEnd%
*/
-/*
- * Description: Statistics for locks.
- *
- * Author: Björn-Egil Dahlberg
- * Date: 2008-07-03
- * Abstract:
- * Locks statistics internal representation.
- *
- * Conceptual representation,
- * - set name
- * | - id (the unique lock)
- * | | - lock type
- * | | - statistics
- * | | | - location (file and line number)
- * | | | - tries
- * | | | - collisions (including trylock busy)
- * | | | - timer (time spent in waiting for lock)
- * | | | - n_timer (collisions excluding trylock busy)
- * | | | - histogram
- * | | | | - # 0 = log2(lock wait_time ns)
- * | | | | - ...
- * | | | | - # n = log2(lock wait_time ns)
- *
- * Each instance of a lock is the unique lock, i.e. set and id in that set.
- * For each lock there is a set of statistics with where and what impact
- * the lock aqusition had.
- *
- * Runtime options
- * - suspend, used when internal lock-counting can't be applied. For instance
- * when allocating a term for the outside and halloc needs to be used.
- * Default: off.
- * - location, reserved and not used.
- * - proclock, disable proclock counting. Used when performance might be an
- * issue. Accessible from erts_debug:lock_counters({process_locks, bool()}).
- * Default: off.
- * - copysave, enable saving of destroyed locks (and thereby its statistics).
- * If memory constraints is an issue this need to be disabled.
- * Accessible from erts_debug:lock_counters({copy_save, bool()}).
- * Default: off.
+/**
+ * @description Statistics for locks.
+ * @file erl_lock_count.h
+ *
+ * @author Björn-Egil Dahlberg
+ * @author John Högberg
+ *
+ * Conceptual representation:
*
+ * - set name
+ * | - id (the unique lock)
+ * | | - lock type
+ * | | - statistics
+ * | | | - location (file and line number)
+ * | | | - attempts
+ * | | | - collisions (including trylock busy)
+ * | | | - timer (time spent in waiting for lock)
+ * | | | - n_timer (collisions excluding trylock busy)
+ * | | | - histogram
+ * | | | | - # 0 = log2(lock wait_time ns)
+ * | | | | - ...
+ * | | | | - # n = log2(lock wait_time ns)
+ *
+ * Each instance of a lock is the unique lock, i.e. set and id in that set.
+ * For each lock there is a set of statistics with where and what impact
+ * the lock acquisition had.
*/
-#include "sys.h"
-
#ifndef ERTS_LOCK_COUNT_H__
#define ERTS_LOCK_COUNT_H__
#ifdef ERTS_ENABLE_LOCK_COUNT
#ifndef ERTS_ENABLE_LOCK_POSITION
-/* Enable in order for _x variants of mtx functions to be used. */
+/** @brief Controls whether _x variants of mtx functions are used. */
#define ERTS_ENABLE_LOCK_POSITION 1
#endif
+#include "sys.h"
#include "ethread.h"
-#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
+#include "erl_term.h"
+#include "erl_lock_flags.h"
+
+#define ERTS_LCNT_MAX_LOCK_LOCATIONS (5)
-/* histogram */
#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1)
#if 0 || defined(ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT)
#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30)
@@ -85,154 +72,857 @@
#define ERTS_LCNT_HISTOGRAM_RSHIFT (10)
#endif
-#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
-#define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1)
-#define ERTS_LCNT_LT_MUTEX (((Uint16) 1) << 2)
-#define ERTS_LCNT_LT_RWMUTEX (((Uint16) 1) << 3)
-#define ERTS_LCNT_LT_PROCLOCK (((Uint16) 1) << 4)
-#define ERTS_LCNT_LT_ALLOC (((Uint16) 1) << 5)
+typedef struct {
+ unsigned long s;
+ unsigned long ns;
+} erts_lcnt_time_t;
+
+typedef struct {
+ /* @brief log2 array of nano seconds occurences */
+ Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
+} erts_lcnt_hist_t;
-#define ERTS_LCNT_LO_READ (((Uint16) 1) << 6)
-#define ERTS_LCNT_LO_WRITE (((Uint16) 1) << 7)
+typedef struct {
+ /** @brief In which file the lock was taken. May be NULL. */
+ const char *file;
+ /** @brief Line number in \c file */
+ unsigned int line;
-#define ERTS_LCNT_LT_DISABLE (((Uint16) 1) << 8)
+ /* "attempts" and "collisions" need to be atomic since try_lock busy does
+ * not acquire a lock and there is no post action to rectify the
+ * situation. */
-#define ERTS_LCNT_LO_READ_WRITE ( ERTS_LCNT_LO_READ \
- | ERTS_LCNT_LO_WRITE )
+ ethr_atomic_t attempts;
+ ethr_atomic_t collisions;
-#define ERTS_LCNT_LT_ALL ( ERTS_LCNT_LT_SPINLOCK \
- | ERTS_LCNT_LT_RWSPINLOCK \
- | ERTS_LCNT_LT_MUTEX \
- | ERTS_LCNT_LT_RWMUTEX \
- | ERTS_LCNT_LT_PROCLOCK )
+ erts_lcnt_time_t total_time_waited;
+ Uint64 times_waited;
-#define ERTS_LCNT_LOCK_TYPE(lock) ((lock)->flag & ERTS_LCNT_LT_ALL)
-#define ERTS_LCNT_IS_LOCK_INVALID(lock) (!((lock)->flag & ERTS_LCNT_LT_ALL))
-#define ERTS_LCNT_CLEAR_FLAG(lock) ((lock)->flag = 0)
+ erts_lcnt_hist_t wait_time_histogram;
+} erts_lcnt_lock_stats_t;
-/* runtime options */
+typedef struct lcnt_lock_info_t_ {
+ erts_lock_flags_t flags;
+ const char *name;
+ /** @brief Id if possible, must be an immediate */
+ Eterm id;
-#define ERTS_LCNT_OPT_SUSPEND (((Uint16) 1) << 0)
-#define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1)
-#define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2)
-#define ERTS_LCNT_OPT_PORTLOCK (((Uint16) 1) << 3)
-#define ERTS_LCNT_OPT_COPYSAVE (((Uint16) 1) << 4)
+ /* The first entry is reserved as a fallback for when location information
+ * is missing, and when the lock is used in more than (MAX_LOCK_LOCATIONS
+ * - 1) different places. */
+ erts_lcnt_lock_stats_t location_stats[ERTS_LCNT_MAX_LOCK_LOCATIONS];
+ unsigned int location_count;
-typedef struct {
- unsigned long s;
- unsigned long ns;
-} erts_lcnt_time_t;
+ /* -- Everything below is internal to this module ---------------------- */
+
+ /* Lock states; rw locks uses both states, other locks only uses w_state */
-extern erts_lcnt_time_t timer_start;
+ /** @brief Write state. 0 = not taken, otherwise n threads waiting */
+ ethr_atomic_t w_state;
+ /** @brief Read state. 0 = not taken, > 0 -> writes will wait */
+ ethr_atomic_t r_state;
+
+ struct lcnt_lock_info_t_ *prev;
+ struct lcnt_lock_info_t_ *next;
+
+ /** @brief Used in place of erts_refc_t to avoid a circular dependency. */
+ ethr_atomic_t ref_count;
+ ethr_atomic32_t lock;
+
+ /** @brief Deletion hook called once \c ref_count reaches 0; may defer
+ * deletion by modifying \c ref_count. */
+ void (*dispose)(struct lcnt_lock_info_t_ *);
+
+ struct lcnt_lock_info_carrier_ *carrier;
+} erts_lcnt_lock_info_t;
+
+typedef struct lcnt_lock_info_list_ {
+ erts_lcnt_lock_info_t head;
+ erts_lcnt_lock_info_t tail;
+} erts_lcnt_lock_info_list_t;
typedef struct {
- Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */
-} erts_lcnt_hist_t;
+ erts_lcnt_time_t timer_start; /**< Time of last clear */
+ erts_lcnt_time_t duration; /**< Time since last clear */
-typedef struct erts_lcnt_lock_stats_s {
- /* "tries" and "colls" needs to be atomic since
- * trylock busy does not acquire a lock and there
- * is no post action to rectify the situation
- */
+ erts_lcnt_lock_info_list_t *current_locks;
+ erts_lcnt_lock_info_list_t *deleted_locks;
+} erts_lcnt_data_t;
- char *file; /* which file the lock was taken */
- unsigned int line; /* line number in file */
+typedef struct lcnt_lock_info_carrier_ erts_lcnt_lock_info_carrier_t;
- ethr_atomic_t tries; /* n tries to get lock */
- ethr_atomic_t colls; /* n collisions of tries to get lock */
+typedef ethr_atomic_t erts_lcnt_ref_t;
- unsigned long timer_n; /* #times waited for lock */
- erts_lcnt_time_t timer; /* total wait time for lock */
- erts_lcnt_hist_t hist;
-} erts_lcnt_lock_stats_t;
+/* -- Globals -------------------------------------------------------------- */
+
+/** @brief Checks whether counting is enabled for any of the given
+ * categories. */
+#define erts_lcnt_check_enabled(flags) \
+ (lcnt_category_mask__ & flags)
+
+/* -- Lock operations ------------------------------------------------------
+ *
+ * All of these will nop if there's nothing "installed" on the given reference,
+ * in order to transparently support enable/disable at runtime. */
+
+/** @brief Records that a lock is being acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_lock
+ * @param option Notes whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option);
+
+/** @brief Records that a lock has been acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_lock_post.
+ * @param file The name of the file where the lock was acquired.
+ * @param line The line at which the lock was acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line);
+
+/** @brief Records that a lock has been released. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_unlock_opt
+ * @param option Whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option);
+
+/** @brief Rectifies the case where a lock wasn't actually a lock operation.
+ *
+ * Only used for process locks at the moment. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref);
+
+/** @brief Records the result of a trylock, placing the queried lock status in
+ * \c result. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result);
+
+/** @copydoc erts_lcnt_trylock
+ * @param option Whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, erts_lock_options_t option);
+
+/* Indexed variants of the standard lock operations, for use when a single
+ * reference contains many counters (eg. process locks).
+ *
+ * erts_lcnt_open_ref must be used to safely extract the installed carrier,
+ * which must released with erts_lcnt_close_reference on success.
+ *
+ * Refer to \c erts_lcnt_lock for example usage. */
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option);
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line);
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option);
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result);
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, erts_lock_options_t option);
+
+/* -- Reference operations ------------------------------------------------- */
+
+/** @brief Registers a lock counter reference; this must be called prior to
+ * using any other functions in this module. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref(erts_lcnt_ref_t *ref);
+
+/** @brief As \c erts_lcnt_init_ref, but also enables lock counting right
+ * away if appropriate to reduce noise.
+ * @param id An immediate erlang term with whatever extra data you want to
+ * identify this lock with. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref_x(erts_lcnt_ref_t *ref, const char *name,
+ Eterm id, erts_lock_flags_t flags);
+
+/** @brief Checks whether counting is enabled on the given reference. */
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_check_ref_installed(erts_lcnt_ref_t *ref);
+
+/** @brief Convenience macro to re/enable counting on an already initialized
+ * reference. Don't forget to specify the lock type in \c flags! */
+#define erts_lcnt_install_new_lock_info(ref, name, id, flags) \
+ if(!erts_lcnt_check_ref_installed(ref)) { \
+ erts_lcnt_lock_info_carrier_t *__carrier; \
+ __carrier = erts_lcnt_create_lock_info_carrier(1);\
+ erts_lcnt_init_lock_info_idx(__carrier, 0, name, id, flags); \
+ erts_lcnt_install(ref, __carrier);\
+ } while(0)
+
+erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int count);
+
+/* @brief Initializes the lock info at the given index.
+ * @param id An immediate erlang term with whatever extra data you want to
+ * identify this lock with.
+ * @param flags The flags the lock itself was initialized with. Keep in mind
+ * that all locks in a carrier must share the same category/static property. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_lock_info_idx(erts_lcnt_lock_info_carrier_t *carrier, int index,
+ const char *name, Eterm id, erts_lock_flags_t flags);
+
+/** @brief Atomically installs the given lock counters. Nops (and releases the
+ * provided carrier) if something was already installed. */
+void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier);
+
+/** @brief Atomically removes the currently installed lock counters. Nops if
+ * nothing was installed. */
+void erts_lcnt_uninstall(erts_lcnt_ref_t *ref);
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result);
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier);
+
+/* -- Module initialization ------------------------------------------------ */
+
+void erts_lcnt_pre_thr_init(void);
+void erts_lcnt_post_thr_init(void);
+void erts_lcnt_late_init(void);
+
+/* @brief Called after everything in the system has been initialized, including
+ * the schedulers. This is mainly a backwards compatibility shim for matching
+ * the old lcnt behavior where all lock counting was enabled by default. */
+void erts_lcnt_post_startup(void);
+
+void erts_lcnt_thread_setup(void);
+void erts_lcnt_thread_exit_handler(void);
+
+/* -- BIF interface -------------------------------------------------------- */
-/* rw locks uses both states, other locks only uses w_state */
-typedef struct erts_lcnt_lock_s {
- char *name; /* lock name */
- Uint16 flag; /* lock type */
- Eterm id; /* id if possible */
+/** @brief Safely iterates through all entries in the given list.
+ *
+ * The referenced item will be valid until the next call to
+ * \c erts_lcnt_iterate_list after which point it may be destroyed; call
+ * erts_lcnt_retain_lock_info if you wish to hang on to it beyond that point.
+ *
+ * Iteration can be cancelled by calling erts_lcnt_release_lock_info on the
+ * iterator and breaking out of the loop.
+ *
+ * @param iterator The iteration variable; set the pointee to NULL to start
+ * iteration.
+ * @return 1 while the iterator is valid, 0 at the end of the list. */
+int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator);
+
+/** @brief Clears the counter state of all locks, and releases all locks
+ * preserved through erts_lcnt_set_preserve_info (if any). */
+void erts_lcnt_clear_counters(void);
+
+/** @brief Retrieves the global lock counter state.
+ *
+ * Note that the lists may be modified while you're mucking around with them.
+ * Always use \c erts_lcnt_iterate_list to enumerate them. */
+erts_lcnt_data_t erts_lcnt_get_data(void);
+
+void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info);
+void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info);
+
+/** @brief Sets whether to preserve the info of destroyed/uninstalled locks.
+ *
+ * This option makes no distinction whether the lock was destroyed or if lock
+ * counting was simply disabled, so erts_lcnt_set_category_mask must not be
+ * used while this option is active. */
+void erts_lcnt_set_preserve_info(int enable);
+
+int erts_lcnt_get_preserve_info(void);
+
+/** @brief Updates the category mask, enabling or disabling counting on the
+ * affected locks as necessary.
+ *
+ * This is not guaranteed to find all existing locks; only those that are
+ * flagged as static locks and those reachable through other means can be
+ * altered. */
+void erts_lcnt_set_category_mask(erts_lock_flags_t mask);
+
+erts_lock_flags_t erts_lcnt_get_category_mask(void);
+
+/* -- Inline implementation ------------------------------------------------ */
+
+/* The following is a hack to get the things we need from erl_thr_progress.h,
+ * which we can't #include without dependency hell breaking loose.
+ *
+ * The size of LcntThrPrgrLaterOp and value of the constant are verified at
+ * compile-time in erts_lcnt_pre_thr_init. */
+
+int lcnt_thr_progress_unmanaged_delay__(void);
+void lcnt_thr_progress_unmanaged_continue__(int handle);
+typedef struct { Uint64 _[4]; } LcntThrPrgrLaterOp;
+#define LCNT_THR_PRGR_DHANDLE_MANAGED -1
+
+struct lcnt_lock_info_carrier_ {
+ ethr_atomic_t ref_count;
+
+ LcntThrPrgrLaterOp release_entries;
+
+ unsigned char entry_count;
+ erts_lcnt_lock_info_t entries[];
+};
+
+typedef struct {
+ erts_lcnt_time_t timer; /* timer */
+ int timer_set; /* bool */
+ int lock_in_conflict; /* bool */
+} lcnt_thread_data_t__;
+
+extern const int lcnt_log2_tab64__[];
+
+extern ethr_tsd_key lcnt_thr_data_key__;
+extern erts_lock_flags_t lcnt_category_mask__;
#ifdef DEBUG
- ethr_atomic_t flowstate;
+extern int lcnt_initialization_completed__;
#endif
- /* lock states */
- ethr_atomic_t w_state; /* 0 not taken, otherwise n threads waiting */
- ethr_atomic_t r_state; /* 0 not taken, > 0 -> writes will wait */
+void lcnt_register_static_lock__(erts_lcnt_ref_t *reference, const char *name, Eterm id,
+ erts_lock_flags_t flags);
- /* statistics */
- unsigned int n_stats;
- erts_lcnt_lock_stats_t stats[ERTS_LCNT_MAX_LOCK_LOCATIONS]; /* first entry is "undefined"*/
+void lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
- /* chains for list handling */
- /* data is hold by lcnt_lock */
- struct erts_lcnt_lock_s *prev;
- struct erts_lcnt_lock_s *next;
-} erts_lcnt_lock_t;
+ERTS_GLB_INLINE
+int lcnt_log2__(Uint64 v);
-typedef struct {
- erts_lcnt_lock_t *head;
- erts_lcnt_lock_t *tail;
- unsigned long n;
-} erts_lcnt_lock_list_t;
+ERTS_GLB_INLINE
+void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited);
-typedef struct {
- erts_lcnt_time_t duration; /* time since last clear */
- erts_lcnt_lock_list_t *current_locks;
- erts_lcnt_lock_list_t *deleted_locks;
-} erts_lcnt_data_t;
+ERTS_GLB_INLINE
+void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited);
-typedef struct {
- int id;
+ERTS_GLB_INLINE
+erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line);
- erts_lcnt_time_t timer; /* timer */
- int timer_set; /* bool */
- int lock_in_conflict; /* bool */
-} erts_lcnt_thread_data_t;
+ERTS_GLB_INLINE
+void lcnt_dec_lock_state__(ethr_atomic_t *l_state);
-/* globals */
+ERTS_GLB_INLINE
+void lcnt_time__(erts_lcnt_time_t *time);
-extern Uint16 erts_lcnt_rt_options;
+ERTS_GLB_INLINE
+void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d);
-/* function declerations */
+ERTS_GLB_INLINE
+void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0);
-void erts_lcnt_init(void);
-void erts_lcnt_late_init(void);
+ERTS_GLB_INLINE
+void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
-/* thread operations */
-void erts_lcnt_thread_setup(void);
-void erts_lcnt_thread_exit_handler(void);
+ERTS_GLB_INLINE
+void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
+
+ERTS_GLB_INLINE
+lcnt_thread_data_t__ *lcnt_get_thread_data__(void);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+void lcnt_time__(erts_lcnt_time_t *time) {
+ /*
+ * erts_sys_hrtime() is the highest resolution
+ * we could find, it may or may not be monotonic...
+ */
+ ErtsMonotonicTime mtime = erts_sys_hrtime();
+ time->s = (unsigned long) (mtime / 1000000000LL);
+ time->ns = (unsigned long) (mtime - 1000000000LL*time->s);
+}
+
+/* difference d must be non-negative */
+
+ERTS_GLB_INLINE
+void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
+ t->s += d->s;
+ t->ns += d->ns;
+
+ t->s += t->ns / 1000000000LL;
+ t->ns = t->ns % 1000000000LL;
+}
+
+ERTS_GLB_INLINE
+void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
+ long ds;
+ long dns;
+
+ ds = t1->s - t0->s;
+ dns = t1->ns - t0->ns;
+
+ /* the difference should not be able to get bigger than 1 sec in ns*/
+
+ if (dns < 0) {
+ ds -= 1;
+ dns += 1000000000LL;
+ }
+
+ ASSERT(ds >= 0);
+
+ d->s = ds;
+ d->ns = dns;
+}
+
+ERTS_GLB_INLINE
+int lcnt_log2__(Uint64 v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+
+ return lcnt_log2_tab64__[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
+}
+
+ERTS_GLB_INLINE
+void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited) {
+ int idx;
+
+ if(time_waited->s > 0 || time_waited->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
+ idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
+ } else {
+ unsigned long r = time_waited->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
+
+ idx = r ? lcnt_log2__(r) : 0;
+ }
+
+ hist->ns[idx]++;
+}
+
+ERTS_GLB_INLINE
+void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited) {
+ ethr_atomic_inc(&stats->attempts);
+
+ if(lock_in_conflict) {
+ ethr_atomic_inc(&stats->collisions);
+ }
+
+ if(time_waited) {
+ stats->times_waited++;
+
+ lcnt_time_add__(&stats->total_time_waited, time_waited);
+ lcnt_update_wait_histogram__(&stats->wait_time_histogram, time_waited);
+ }
+}
+
+/* If we were installed while the lock was held, r/w_state will be 0 and we
+ * can't tell which unlock or unacquire operation was the last. To get around
+ * this we assume that all excess operations go *towards* zero rather than down
+ * to zero, eventually becoming consistent with the actual state once the lock
+ * is fully released.
+ *
+ * Conflicts might not be counted until the recorded state is fully consistent
+ * with the actual state, but there should be no other ill effects. */
+
+ERTS_GLB_INLINE
+void lcnt_dec_lock_state__(ethr_atomic_t *l_state) {
+ ethr_sint_t state = ethr_atomic_dec_read_acqb(l_state);
+
+ /* We can not assume that state is >= -1 here; unlock and unacquire might
+ * bring it below -1 and race to increment it back. */
+
+ if(state < 0) {
+ ethr_atomic_inc_acqb(l_state);
+ }
+}
+
+ERTS_GLB_INLINE
+erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line) {
+ unsigned int i;
+
+ ASSERT(info->location_count >= 1 && info->location_count <= ERTS_LCNT_MAX_LOCK_LOCATIONS);
+
+ for(i = 0; i < info->location_count; i++) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
+
+ if(stats->file == file && stats->line == line) {
+ return stats;
+ }
+ }
-/* list operations (local) */
-erts_lcnt_lock_list_t *erts_lcnt_list_init(void);
+ if(info->location_count < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[info->location_count];
-void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
-void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
+ stats->file = file;
+ stats->line = line;
-/* lock operations (global) */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag);
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id);
-void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock);
-void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock);
+ info->location_count++;
-void erts_lcnt_lock(erts_lcnt_lock_t *lock);
-void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option);
-void erts_lcnt_lock_post(erts_lcnt_lock_t *lock);
-void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line);
-void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock);
+ return stats;
+ }
-void erts_lcnt_unlock(erts_lcnt_lock_t *lock);
-void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option);
+ return &info->location_stats[0];
+}
-void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option);
-void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res);
+ERTS_GLB_INLINE
+lcnt_thread_data_t__ *lcnt_get_thread_data__(void) {
+ lcnt_thread_data_t__ *eltd = (lcnt_thread_data_t__ *)ethr_tsd_get(lcnt_thr_data_key__);
-/* bif interface */
-Uint16 erts_lcnt_set_rt_opt(Uint16 opt);
-Uint16 erts_lcnt_clear_rt_opt(Uint16 opt);
-void erts_lcnt_clear_counters(void);
-char *erts_lcnt_lock_type(Uint16 type);
-erts_lcnt_data_t *erts_lcnt_get_data(void);
+ ASSERT(eltd);
+
+ return eltd;
+}
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result) {
+ if(ERTS_LIKELY(!erts_lcnt_check_ref_installed(ref))) {
+ return 0;
+ }
+
+ ASSERT(lcnt_initialization_completed__);
+
+ (*handle) = lcnt_thr_progress_unmanaged_delay__();
+ (*result) = (erts_lcnt_lock_info_carrier_t*)ethr_atomic_read(ref);
+
+ if(*result) {
+ if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_retain_carrier__(*result);
+ lcnt_thr_progress_unmanaged_continue__(*handle);
+ }
+
+ return 1;
+ } else if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_thr_progress_unmanaged_continue__(*handle);
+ }
+
+ return 0;
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier) {
+ if(handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_release_carrier__(carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref(erts_lcnt_ref_t *ref) {
+ ethr_atomic_init(ref, (ethr_sint_t)NULL);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref_x(erts_lcnt_ref_t *ref, const char *name,
+ Eterm id, erts_lock_flags_t flags) {
+ erts_lcnt_init_ref(ref);
+
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC) {
+ lcnt_register_static_lock__(ref, name, id, flags);
+ }
+
+ if(erts_lcnt_check_enabled(flags)) {
+ erts_lcnt_install_new_lock_info(ref, name, id, flags);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_check_ref_installed(erts_lcnt_ref_t *ref) {
+ return (!!*ethr_atomic_addr(ref));
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_opt_idx(carrier, 0, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_post_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_post_x_idx(carrier, 0, file, line);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_unacquire_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_unlock_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_unlock_opt_idx(carrier, 0, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_trylock_idx(carrier, 0, result);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_trylock_opt_idx(carrier, 0, result, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_opt_idx(carrier, index, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ ethr_sint_t w_state, r_state;
+
+ w_state = ethr_atomic_inc_read(&info->w_state) - 1;
+ r_state = ethr_atomic_read(&info->r_state);
+
+ /* We cannot acquire w_lock if either w or r are taken */
+ eltd->lock_in_conflict = (w_state > 0) || (r_state > 0);
+ } else {
+ ethr_sint_t w_state = ethr_atomic_read(&info->w_state);
+
+ /* We cannot acquire r_lock if w_lock is taken */
+ eltd->lock_in_conflict = (w_state > 0);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ ethr_atomic_inc(&info->r_state);
+ }
+
+ if(eltd->lock_in_conflict) {
+ /* Only set the timer if nobody else has it. This should only happen
+ * when proc_locks acquires several locks "atomically." All other locks
+ * will block the thread when locked (w_state > 0) */
+ if(eltd->timer_set == 0) {
+ lcnt_time__(&eltd->timer);
+ }
+
+ eltd->timer_set++;
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_post_x_idx(carrier, index, NULL, 0);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
+ erts_lcnt_lock_stats_t *stats;
+
+ ASSERT(index < carrier->entry_count);
+
+ /* If the lock was in conflict, update the time spent waiting. */
+ stats = lcnt_get_lock_stats__(info, file, line);
+ if(eltd->timer_set) {
+ erts_lcnt_time_t time_wait;
+ erts_lcnt_time_t timer;
+
+ lcnt_time__(&timer);
+
+ lcnt_time_diff__(&time_wait, &timer, &eltd->timer);
+ lcnt_update_stats__(stats, eltd->lock_in_conflict, &time_wait);
+
+ eltd->timer_set--;
+
+ ASSERT(eltd->timer_set >= 0);
+ } else {
+ lcnt_update_stats__(stats, eltd->lock_in_conflict, NULL);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ ASSERT(index < carrier->entry_count);
+
+ erts_lcnt_unlock_opt_idx(carrier, index, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ lcnt_dec_lock_state__(&info->w_state);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ lcnt_dec_lock_state__(&info->r_state);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ lcnt_dec_lock_state__(&info->w_state);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result) {
+ ASSERT(index < carrier->entry_count);
+
+ erts_lcnt_trylock_opt_idx(carrier, index, result, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(result != EBUSY) {
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ ethr_atomic_inc(&info->w_state);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ ethr_atomic_inc(&info->r_state);
+ }
+
+ lcnt_update_stats__(&info->location_stats[0], 0, NULL);
+ } else {
+ ethr_atomic_inc(&info->location_stats[0].attempts);
+ ethr_atomic_inc(&info->location_stats[0].collisions);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_lock_info_idx(erts_lcnt_lock_info_carrier_t *carrier, int index,
+ const char *name, Eterm id, erts_lock_flags_t flags) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(is_immed(id));
+
+ ASSERT(flags & ERTS_LOCK_FLAGS_MASK_TYPE);
+ ASSERT(flags & ERTS_LOCK_FLAGS_MASK_CATEGORY);
+
+ info->flags = flags;
+ info->name = name;
+ info->id = id;
+}
+
+ERTS_GLB_INLINE
+void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+#ifdef DEBUG
+ ASSERT(ethr_atomic_inc_read_acqb(&carrier->ref_count) >= 2);
+#else
+ ethr_atomic_inc_acqb(&carrier->ref_count);
+#endif
+}
+
+ERTS_GLB_INLINE
+void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+ ethr_sint_t count = ethr_atomic_dec_read_relb(&carrier->ref_count);
+
+ ASSERT(count >= 0);
+
+ if(count == 0) {
+ lcnt_deallocate_carrier__(carrier);
+ }
+}
+
+#endif
#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
#endif /* ifndef ERTS_LOCK_COUNT_H__ */
diff --git a/erts/emulator/beam/erl_lock_flags.c b/erts/emulator/beam/erl_lock_flags.c
new file mode 100644
index 0000000000..e0a0e95c09
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_flags.c
@@ -0,0 +1,59 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_lock_flags.h"
+
+const char *erts_lock_flags_get_type_name(erts_lock_flags_t flags) {
+ switch(flags & ERTS_LOCK_FLAGS_MASK_TYPE) {
+ case ERTS_LOCK_FLAGS_TYPE_PROCLOCK:
+ return "proclock";
+ case ERTS_LOCK_FLAGS_TYPE_MUTEX:
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE) {
+ return "rw_mutex";
+ }
+
+ return "mutex";
+ case ERTS_LOCK_FLAGS_TYPE_SPINLOCK:
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE) {
+ return "rw_spinlock";
+ }
+
+ return "spinlock";
+ default:
+ return "garbage";
+ }
+}
+
+const char *erts_lock_options_get_short_desc(erts_lock_options_t options) {
+ switch(options) {
+ case ERTS_LOCK_OPTIONS_RDWR:
+ return "rw";
+ case ERTS_LOCK_OPTIONS_READ:
+ return "r";
+ case ERTS_LOCK_OPTIONS_WRITE:
+ return "w";
+ default:
+ return "none";
+ }
+}
diff --git a/erts/emulator/beam/erl_lock_flags.h b/erts/emulator/beam/erl_lock_flags.h
new file mode 100644
index 0000000000..d711f69456
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_flags.h
@@ -0,0 +1,78 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERTS_LOCK_FLAGS_H__
+#define ERTS_LOCK_FLAGS_H__
+
+#define ERTS_LOCK_OPTIONS_READ (1 << 1)
+#define ERTS_LOCK_OPTIONS_WRITE (1 << 2)
+
+#define ERTS_LOCK_OPTIONS_RDWR (ERTS_LOCK_OPTIONS_READ | ERTS_LOCK_OPTIONS_WRITE)
+
+/* Property/category are bitfields to simplify their use in masks. */
+#define ERTS_LOCK_FLAGS_MASK_CATEGORY (0xFFC0)
+#define ERTS_LOCK_FLAGS_MASK_PROPERTY (0x0030)
+
+/* Type is a plain number. */
+#define ERTS_LOCK_FLAGS_MASK_TYPE (0x000F)
+
+#define ERTS_LOCK_FLAGS_TYPE_SPINLOCK (1)
+#define ERTS_LOCK_FLAGS_TYPE_MUTEX (2)
+#define ERTS_LOCK_FLAGS_TYPE_PROCLOCK (3)
+
+/* "Static" guarantees that the lock will never be destroyed once created. */
+#define ERTS_LOCK_FLAGS_PROPERTY_STATIC (1 << 4)
+#define ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE (1 << 5)
+
+#define ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR (1 << 6)
+#define ERTS_LOCK_FLAGS_CATEGORY_PROCESS (1 << 7)
+#define ERTS_LOCK_FLAGS_CATEGORY_IO (1 << 8)
+#define ERTS_LOCK_FLAGS_CATEGORY_DB (1 << 9)
+#define ERTS_LOCK_FLAGS_CATEGORY_DEBUG (1 << 10)
+#define ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER (1 << 11)
+#define ERTS_LOCK_FLAGS_CATEGORY_GENERIC (1 << 12)
+#define ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION (1 << 13)
+
+#define ERTS_LOCK_TYPE_SPINLOCK \
+ (ERTS_LOCK_FLAGS_TYPE_SPINLOCK)
+#define ERTS_LOCK_TYPE_RWSPINLOCK \
+ (ERTS_LOCK_TYPE_SPINLOCK | \
+ ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE)
+#define ERTS_LOCK_TYPE_MUTEX \
+ (ERTS_LOCK_FLAGS_TYPE_MUTEX)
+#define ERTS_LOCK_TYPE_RWMUTEX \
+ (ERTS_LOCK_TYPE_MUTEX | \
+ ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE)
+#define ERTS_LOCK_TYPE_PROCLOCK \
+ (ERTS_LOCK_FLAGS_CATEGORY_PROCESS | \
+ ERTS_LOCK_FLAGS_TYPE_PROCLOCK)
+
+/* -- -- */
+
+typedef unsigned short erts_lock_flags_t;
+typedef unsigned short erts_lock_options_t;
+
+/* @brief Gets the type name of the lock, honoring the RW flag if supplied. */
+const char *erts_lock_flags_get_type_name(erts_lock_flags_t flags);
+
+/* @brief Gets a short-form description of the given lock options. (rw/r/w) */
+const char *erts_lock_options_get_short_desc(erts_lock_options_t options);
+
+#endif /* ERTS_LOCK_FLAGS_H__ */
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
index 2d70f0d874..6c477be615 100644
--- a/erts/emulator/beam/erl_msacc.c
+++ b/erts/emulator/beam/erl_msacc.c
@@ -76,7 +76,8 @@ void erts_msacc_early_init(void) {
#ifndef ERTS_MSACC_ALWAYS_ON
erts_msacc_enabled = 0;
#endif
- erts_rwmtx_init(&msacc_mutex,"msacc_list_mutex");
+ erts_rwmtx_init(&msacc_mutex, "msacc_list_mutex", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
#ifdef USE_THREADS
erts_tsd_key_create(&erts_msacc_key,"erts_msacc_key");
#else
@@ -109,7 +110,8 @@ void erts_msacc_init_thread(char *type, int id, int managed) {
#ifdef USE_THREADS
erts_rwmtx_rwlock(&msacc_mutex);
if (!managed) {
- erts_mtx_init(&msacc->mtx,"msacc_unmanaged_mutex");
+ erts_mtx_init(&msacc->mtx, "msacc_unmanaged_mutex", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
msacc->next = msacc_unmanaged;
msacc_unmanaged = msacc;
msacc_unmanaged_count++;
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index 19bb7d5b31..f2a660f085 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -583,8 +583,10 @@ void erts_mtrace_init(char *receiver, char *nodename)
byte ip_addr[4];
Uint16 port;
- erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf");
- erts_mtx_init(&mtrace_op_mutex, "mtrace_op");
+ erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
+ erts_mtx_init(&mtrace_op_mutex, "mtrace_op", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
socket_desc = erts_sock_open();
if (socket_desc == ERTS_SOCK_INVALID_SOCKET) {
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 4815e5e7bb..a3016ca912 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -56,6 +56,7 @@
#include "erl_process.h"
#include "erl_bif_unique.h"
#include "erl_utils.h"
+#include "erl_io_queue.h"
#undef ERTS_WANT_NFUNC_SCHED_INTERNALS__
#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
#include "erl_nfunc_sched.h"
@@ -66,6 +67,13 @@
#include <limits.h>
#include <stddef.h> /* offsetof */
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
/* Information about a loaded nif library.
* Each successful call to erlang:load_nif will allocate an instance of
@@ -588,6 +596,10 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
ErlTraceMessageQueue *msgq, **last_msgq;
int reds = 0;
+ /* Only one thread at a time is allowed to flush trace messages,
+ so we require the main lock to be held when doing the flush */
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
msgq = c_p->trace_msg_q;
@@ -889,26 +901,27 @@ static Eterm call_whereis(ErlNifEnv *env, Eterm name)
Process *c_p;
Eterm res;
int scheduler;
- int unlock;
execution_state(env, &c_p, &scheduler);
ASSERT((c_p && scheduler) || (!c_p && !scheduler));
- unlock = 0;
if (scheduler < 0) {
/* dirty scheduler */
if (ERTS_PROC_IS_EXITING(c_p))
return 0;
- if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
- unlock = 1;
- }
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ c_p = NULL; /* as we don't have main lock */
}
- res = erts_whereis_name_to_id(c_p, name);
- if (unlock)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ if (c_p) {
+ /* main lock may be released below and c_p->htop updated by others */
+ flush_env(env);
+ }
+ res = erts_whereis_name_to_id(c_p, name);
+ if (c_p)
+ cache_env(env);
return res;
}
@@ -2456,7 +2469,8 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
erts_refc_inc(&resource->type->refc, 2);
if (type->down) {
resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs);
- erts_smp_mtx_init(&resource->monitors->lock, "resource_monitors");
+ erts_smp_mtx_init(&resource->monitors->lock, "resource_monitors", NIL,
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
resource->monitors->root = NULL;
resource->monitors->pending_failed_fire = 0;
resource->monitors->is_dying = 0;
@@ -3342,6 +3356,363 @@ int enif_compare_monitors(const ErlNifMonitor *monitor1,
ERTS_REF_THING_SIZE*sizeof(Eterm));
}
+ErlNifIOQueue *enif_ioq_create(ErlNifIOQueueOpts opts)
+{
+ ErlNifIOQueue *q;
+
+ if (opts != ERL_NIF_IOQ_NORMAL)
+ return NULL;
+
+ q = enif_alloc(sizeof(ErlNifIOQueue));
+ if (!q) return NULL;
+ erts_ioq_init(q, ERTS_ALC_T_NIF, 0);
+
+ return q;
+}
+
+void enif_ioq_destroy(ErlNifIOQueue *q)
+{
+ erts_ioq_clear(q);
+ enif_free(q);
+}
+
+/* If the iovec was preallocated (Stack or otherwise) it needs to be marked as
+ * such to perform a proper free. */
+#define ERL_NIF_IOVEC_FLAGS_PREALLOC (1 << 0)
+
+void enif_free_iovec(ErlNifIOVec *iov)
+{
+ int i;
+ /* Decrement the refc of all the binaries */
+ for (i = 0; i < iov->iovcnt; i++) {
+ Binary *bptr = ((Binary**)iov->ref_bins)[i];
+ /* bptr can be null if enq_binary was used */
+ if (bptr && erts_refc_dectest(&bptr->intern.refc, 0) == 0) {
+ erts_bin_free(bptr);
+ }
+ }
+
+ if (!(iov->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
+ enif_free(iov);
+ }
+}
+
+typedef struct {
+ UWord sublist_length;
+ Eterm sublist_start;
+ Eterm sublist_end;
+
+ UWord offheap_size;
+ UWord onheap_size;
+
+ UWord iovec_len;
+} iovec_slice_t;
+
+static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *result) {
+ Eterm lookahead;
+
+ result->sublist_start = list;
+ result->sublist_length = 0;
+ result->offheap_size = 0;
+ result->onheap_size = 0;
+ result->iovec_len = 0;
+
+ lookahead = result->sublist_start;
+
+ while (is_list(lookahead)) {
+ Eterm *binary_header, binary;
+ Eterm *cell;
+ UWord size;
+
+ cell = list_val(lookahead);
+ binary = CAR(cell);
+
+ if (!is_binary(binary)) {
+ return 0;
+ }
+
+ size = binary_size(binary);
+ binary_header = binary_val(binary);
+
+ /* If we're a sub-binary we'll need to check our underlying binary to
+ * determine whether we're on-heap or not. */
+ if(thing_subtag(*binary_header) == SUB_BINARY_SUBTAG) {
+ ErlSubBin *sb = (ErlSubBin*)binary_header;
+
+ /* Reject bitstrings */
+ if((sb->bitoffs + sb->bitsize) > 0) {
+ return 0;
+ }
+
+ ASSERT(size <= binary_size(sb->orig));
+ binary_header = binary_val(sb->orig);
+ }
+
+ if(thing_subtag(*binary_header) == HEAP_BINARY_SUBTAG) {
+ ASSERT(size <= ERL_ONHEAP_BIN_LIMIT);
+
+ result->iovec_len += 1;
+ result->onheap_size += size;
+ } else {
+ ASSERT(thing_subtag(*binary_header) == REFC_BINARY_SUBTAG);
+
+ result->iovec_len += 1 + size / MAX_SYSIOVEC_IOVLEN;
+ result->offheap_size += size;
+ }
+
+ result->sublist_length += 1;
+ lookahead = CDR(cell);
+
+ if(result->sublist_length >= max_length) {
+ break;
+ }
+ }
+
+ if (!is_nil(lookahead) && !is_list(lookahead)) {
+ return 0;
+ }
+
+ result->sublist_end = lookahead;
+
+ return 1;
+}
+
+static void inspect_raw_binary_data(Eterm binary, ErlNifBinary *result) {
+ Eterm *parent_header;
+ Eterm parent_binary;
+
+ int bit_offset, bit_size;
+ Uint byte_offset;
+
+ ASSERT(is_binary(binary));
+
+ ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset, bit_offset, bit_size);
+
+ parent_header = binary_val(parent_binary);
+
+ result->size = binary_size(binary);
+ result->bin_term = binary;
+
+ if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) {
+ ProcBin *pb = (ProcBin*)parent_header;
+
+ ASSERT(pb->val != NULL);
+ ASSERT(byte_offset < pb->size);
+ ASSERT(&pb->bytes[byte_offset] >= (byte*)(pb->val)->orig_bytes);
+
+ result->data = (unsigned char*)&pb->bytes[byte_offset];
+ result->ref_bin = (void*)pb->val;
+ } else {
+ ErlHeapBin *hb = (ErlHeapBin*)parent_header;
+
+ ASSERT(thing_subtag(*parent_header) == HEAP_BINARY_SUBTAG);
+
+ result->data = &((unsigned char*)&hb->data)[byte_offset];
+ result->ref_bin = NULL;
+ }
+}
+
+static int fill_iovec_with_slice(ErlNifEnv *env,
+ iovec_slice_t *slice,
+ ErlNifIOVec *iovec) {
+ UWord onheap_offset, iovec_idx;
+ ErlNifBinary onheap_data;
+ Eterm sublist_iterator;
+
+ /* Set up a common refc binary for all on-heap binaries. */
+ if (slice->onheap_size > 0) {
+ if (!enif_alloc_binary(slice->onheap_size, &onheap_data)) {
+ return 0;
+ }
+ }
+
+ sublist_iterator = slice->sublist_start;
+ onheap_offset = 0;
+ iovec_idx = 0;
+
+ while (sublist_iterator != slice->sublist_end) {
+ ErlNifBinary raw_data;
+ Eterm *cell;
+
+ cell = list_val(sublist_iterator);
+ inspect_raw_binary_data(CAR(cell), &raw_data);
+
+ /* If this isn't a refc binary, copy its contents to the onheap buffer
+ * and reference that instead. */
+ if (raw_data.ref_bin == NULL) {
+ ASSERT(onheap_offset < onheap_data.size);
+ ASSERT(slice->onheap_size > 0);
+
+ sys_memcpy(&onheap_data.data[onheap_offset],
+ raw_data.data, raw_data.size);
+
+ raw_data.data = &onheap_data.data[onheap_offset];
+ raw_data.ref_bin = onheap_data.ref_bin;
+ }
+
+ ASSERT(raw_data.ref_bin != NULL);
+
+ while (raw_data.size > 0) {
+ UWord chunk_len = MIN(raw_data.size, MAX_SYSIOVEC_IOVLEN);
+
+ ASSERT(iovec_idx < iovec->iovcnt);
+
+ iovec->iov[iovec_idx].iov_base = raw_data.data;
+ iovec->iov[iovec_idx].iov_len = chunk_len;
+
+ iovec->ref_bins[iovec_idx] = raw_data.ref_bin;
+
+ raw_data.data += chunk_len;
+ raw_data.size -= chunk_len;
+
+ iovec_idx += 1;
+ }
+
+ sublist_iterator = CDR(cell);
+ }
+
+ ASSERT(iovec_idx == iovec->iovcnt);
+
+ if (env == NULL) {
+ int i;
+ for (i = 0; i < iovec->iovcnt; i++) {
+ Binary *refc_binary = (Binary*)(iovec->ref_bins[i]);
+ erts_refc_inc(&refc_binary->intern.refc, 1);
+ }
+
+ if (slice->onheap_size > 0) {
+ /* Transfer ownership to the iovec; we've taken references to it in
+ * the above loop. */
+ enif_release_binary(&onheap_data);
+ }
+ } else {
+ if (slice->onheap_size > 0) {
+ /* Attach the binary to our environment and let the GC take care of
+ * it after returning. */
+ enif_make_binary(env, &onheap_data);
+ }
+ }
+
+ return 1;
+}
+
+static int create_iovec_from_slice(ErlNifEnv *env,
+ iovec_slice_t *slice,
+ ErlNifIOVec **result) {
+ ErlNifIOVec *iovec = *result;
+
+ if (iovec && slice->iovec_len < ERL_NIF_IOVEC_SIZE) {
+ iovec->iov = iovec->small_iov;
+ iovec->ref_bins = iovec->small_ref_bin;
+ iovec->flags = ERL_NIF_IOVEC_FLAGS_PREALLOC;
+ } else {
+ UWord iov_offset, binv_offset, alloc_size;
+ char *alloc_base;
+
+ iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlNifIOVec));
+ binv_offset = iov_offset;
+ binv_offset += ERTS_ALC_DATA_ALIGN_SIZE(slice->iovec_len * sizeof(SysIOVec));
+ alloc_size = binv_offset;
+ alloc_size += slice->iovec_len * sizeof(Binary*);
+
+ /* If we have an environment we'll attach the allocated data to it. The
+ * GC will take care of releasing it later on. */
+ if (env != NULL) {
+ ErlNifBinary gc_bin;
+
+ if (!enif_alloc_binary(alloc_size, &gc_bin)) {
+ return 0;
+ }
+
+ alloc_base = (char*)gc_bin.data;
+ enif_make_binary(env, &gc_bin);
+ } else {
+ alloc_base = enif_alloc(alloc_size);
+ }
+
+ iovec = (ErlNifIOVec*)alloc_base;
+ iovec->iov = (SysIOVec*)(alloc_base + iov_offset);
+ iovec->ref_bins = (void**)(alloc_base + binv_offset);
+ iovec->flags = 0;
+ }
+
+ iovec->size = slice->offheap_size + slice->onheap_size;
+ iovec->iovcnt = slice->iovec_len;
+
+ if(!fill_iovec_with_slice(env, slice, iovec)) {
+ if (env == NULL && !(iovec->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
+ enif_free(iovec);
+ }
+
+ return 0;
+ }
+
+ *result = iovec;
+
+ return 1;
+}
+
+int enif_inspect_iovec(ErlNifEnv *env, size_t max_elements,
+ ERL_NIF_TERM list, ERL_NIF_TERM *tail,
+ ErlNifIOVec **iov) {
+ iovec_slice_t slice;
+
+ if(!examine_iovec_term(list, max_elements, &slice)) {
+ return 0;
+ } else if(!create_iovec_from_slice(env, &slice, iov)) {
+ return 0;
+ }
+
+ (*tail) = slice.sublist_end;
+
+ return 1;
+}
+
+/* */
+int enif_ioq_enqv(ErlNifIOQueue *q, ErlNifIOVec *iov, size_t skip)
+{
+ if(skip <= iov->size) {
+ return !erts_ioq_enqv(q, (ErtsIOVec*)iov, skip);
+ }
+
+ return 0;
+}
+
+int enif_ioq_enq_binary(ErlNifIOQueue *q, ErlNifBinary *bin, size_t skip)
+{
+ ErlNifIOVec vec = {1, bin->size, NULL, NULL, ERL_NIF_IOVEC_FLAGS_PREALLOC };
+ Binary *ref_bin = (Binary*)bin->ref_bin;
+ int res;
+ vec.iov = vec.small_iov;
+ vec.ref_bins = vec.small_ref_bin;
+ vec.iov[0].iov_base = bin->data;
+ vec.iov[0].iov_len = bin->size;
+ ((Binary**)(vec.ref_bins))[0] = ref_bin;
+
+ res = enif_ioq_enqv(q, &vec, skip);
+ enif_release_binary(bin);
+ return res;
+}
+
+size_t enif_ioq_size(ErlNifIOQueue *q)
+{
+ return erts_ioq_size(q);
+}
+
+int enif_ioq_deq(ErlNifIOQueue *q, size_t elems, size_t *size)
+{
+ if (erts_ioq_deq(q, elems) == -1)
+ return 0;
+ if (size)
+ *size = erts_ioq_size(q);
+ return 1;
+}
+
+SysIOVec *enif_ioq_peek(ErlNifIOQueue *q, int *iovlen)
+{
+ return erts_ioq_peekq(q, iovlen);
+}
+
/***************************************************************************
** load_nif/2 **
***************************************************************************/
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index b0d5c39798..d195721054 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -50,6 +50,7 @@
** 2.9: 18.2 enif_getenv
** 2.10: Time API
** 2.11: 19.0 enif_snprintf
+** 2.12: 20.0 add enif_queue
*/
#define ERL_NIF_MAJOR_VERSION 2
#define ERL_NIF_MINOR_VERSION 12
@@ -241,6 +242,28 @@ typedef enum {
ERL_NIF_PHASH2 = 2
} ErlNifHash;
+#define ERL_NIF_IOVEC_SIZE 16
+
+typedef struct erl_nif_io_vec {
+ int iovcnt; /* length of vectors */
+ size_t size; /* total size in bytes */
+ SysIOVec *iov;
+
+ /* internals (avert your eyes) */
+ void **ref_bins; /* Binary[] */
+ int flags;
+
+ /* Used when stack allocating the io vec */
+ SysIOVec small_iov[ERL_NIF_IOVEC_SIZE];
+ void *small_ref_bin[ERL_NIF_IOVEC_SIZE];
+} ErlNifIOVec;
+
+typedef struct erts_io_queue ErlNifIOQueue;
+
+typedef enum {
+ ERL_NIF_IOQ_NORMAL = 1
+} ErlNifIOQueueOpts;
+
/*
* Return values from enif_thread_type(). Negative values
* reserved for specific types of non-scheduler threads.
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index 94c04cd126..9e573307d8 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -184,6 +184,21 @@ ERL_NIF_API_FUNC_DECL(ErlNifUInt64,enif_hash,(ErlNifHash type, ERL_NIF_TERM term
ERL_NIF_API_FUNC_DECL(int, enif_whereis_pid, (ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPid *pid));
ERL_NIF_API_FUNC_DECL(int, enif_whereis_port, (ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port));
+ERL_NIF_API_FUNC_DECL(ErlNifIOQueue *,enif_ioq_create,(ErlNifIOQueueOpts opts));
+ERL_NIF_API_FUNC_DECL(void,enif_ioq_destroy,(ErlNifIOQueue *q));
+
+ERL_NIF_API_FUNC_DECL(int,enif_ioq_enq_binary,(ErlNifIOQueue *q, ErlNifBinary *bin, size_t skip));
+ERL_NIF_API_FUNC_DECL(int,enif_ioq_enqv,(ErlNifIOQueue *q, ErlNifIOVec *iov, size_t skip));
+
+ERL_NIF_API_FUNC_DECL(size_t,enif_ioq_size,(ErlNifIOQueue *q));
+ERL_NIF_API_FUNC_DECL(int,enif_ioq_deq,(ErlNifIOQueue *q, size_t count, size_t *size));
+
+ERL_NIF_API_FUNC_DECL(SysIOVec*,enif_ioq_peek,(ErlNifIOQueue *q, int *iovlen));
+
+ERL_NIF_API_FUNC_DECL(int,enif_inspect_iovec,(ErlNifEnv *env, size_t max_length, ERL_NIF_TERM iovec_term, ERL_NIF_TERM *tail, ErlNifIOVec **iovec));
+ERL_NIF_API_FUNC_DECL(void,enif_free_iovec,(ErlNifIOVec *iov));
+
+
/*
** ADD NEW ENTRIES HERE (before this comment) !!!
*/
@@ -348,6 +363,16 @@ ERL_NIF_API_FUNC_DECL(int, enif_whereis_port, (ErlNifEnv *env, ERL_NIF_TERM name
# define enif_hash ERL_NIF_API_FUNC_MACRO(enif_hash)
# define enif_whereis_pid ERL_NIF_API_FUNC_MACRO(enif_whereis_pid)
# define enif_whereis_port ERL_NIF_API_FUNC_MACRO(enif_whereis_port)
+# define enif_ioq_create ERL_NIF_API_FUNC_MACRO(enif_ioq_create)
+# define enif_ioq_destroy ERL_NIF_API_FUNC_MACRO(enif_ioq_destroy)
+# define enif_ioq_enq ERL_NIF_API_FUNC_MACRO(enif_ioq_enq)
+# define enif_ioq_enq_binary ERL_NIF_API_FUNC_MACRO(enif_ioq_enq_binary)
+# define enif_ioq_enqv ERL_NIF_API_FUNC_MACRO(enif_ioq_enqv)
+# define enif_ioq_size ERL_NIF_API_FUNC_MACRO(enif_ioq_size)
+# define enif_ioq_deq ERL_NIF_API_FUNC_MACRO(enif_ioq_deq)
+# define enif_ioq_peek ERL_NIF_API_FUNC_MACRO(enif_ioq_peek)
+# define enif_inspect_iovec ERL_NIF_API_FUNC_MACRO(enif_inspect_iovec)
+# define enif_free_iovec ERL_NIF_API_FUNC_MACRO(enif_free_iovec)
/*
** ADD NEW ENTRIES HERE (before this comment)
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 3c5945d48d..deadf435e9 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -85,21 +85,20 @@ dist_table_cmp(void *dep1, void *dep2)
static void*
dist_table_alloc(void *dep_tmpl)
{
- Eterm chnl_nr;
Eterm sysname;
DistEntry *dep;
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
sysname = ((DistEntry *) dep_tmpl)->sysname;
- chnl_nr = make_small((Uint) atom_val(sysname));
dep = (DistEntry *) erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry));
dist_entries++;
dep->prev = NULL;
erts_smp_refc_init(&dep->refc, -1);
- erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
+ erts_smp_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->sysname = sysname;
dep->cid = NIL;
dep->connection_id = 0;
@@ -107,12 +106,14 @@ dist_table_alloc(void *dep_tmpl)
dep->flags = 0;
dep->version = 0;
- erts_smp_mtx_init_x(&dep->lnk_mtx, "dist_entry_links", chnl_nr);
+ erts_smp_mtx_init(&dep->lnk_mtx, "dist_entry_links", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->node_links = NULL;
dep->nlinks = NULL;
dep->monitors = NULL;
- erts_smp_mtx_init_x(&dep->qlock, "dist_entry_out_queue", chnl_nr);
+ erts_smp_mtx_init(&dep->qlock, "dist_entry_out_queue", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->qflgs = 0;
dep->qsize = 0;
dep->out_queue.first = NULL;
@@ -760,8 +761,10 @@ void erts_init_node_tables(int dd_sec)
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table");
- erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table");
+ erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
f.hash = (H_FUN) dist_table_hash;
f.cmp = (HCMP_FUN) dist_table_cmp;
@@ -818,6 +821,33 @@ int erts_lc_is_de_rlocked(DistEntry *dep)
#endif
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+static void erts_lcnt_enable_dist_lock_count(void *dep_raw, void *enable) {
+ DistEntry *dep = (DistEntry*)dep_raw;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&dep->rwmtx.lcnt, "dist_entry", dep->sysname,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_lcnt_install_new_lock_info(&dep->lnk_mtx.lcnt, "dist_entry_links", dep->sysname,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_lcnt_install_new_lock_info(&dep->qlock.lcnt, "dist_entry_out_queue", dep->sysname,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ } else {
+ erts_lcnt_uninstall(&dep->rwmtx.lcnt);
+ erts_lcnt_uninstall(&dep->lnk_mtx.lcnt);
+ erts_lcnt_uninstall(&dep->qlock.lcnt);
+ }
+}
+
+void erts_lcnt_update_distribution_locks(int enable) {
+ erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ hash_foreach(&erts_dist_table, erts_lcnt_enable_dist_lock_count,
+ (void*)(UWord)enable);
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+}
+#endif
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The following is only supposed to be used for testing, and debugging. *
* *
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index 489da1ba17..91bcb4fce1 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -195,6 +195,10 @@ int erts_lc_is_de_rwlocked(DistEntry *);
int erts_lc_is_de_rlocked(DistEntry *);
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_update_distribution_locks(int enable);
+#endif
+
ERTS_GLB_INLINE void erts_deref_dist_entry(DistEntry *dep);
ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np);
ERTS_GLB_INLINE void erts_smp_de_rlock(DistEntry *dep);
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index 6a3213ec52..b64de624dd 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -31,6 +31,9 @@ typedef struct ErtsProc2PortSigData_ ErtsProc2PortSigData;
#include "erl_ptab.h"
#include "erl_thr_progress.h"
#include "erl_trace.h"
+#define ERTS_IO_QUEUE_TYPES_ONLY__
+#include "erl_io_queue.h"
+#undef ERTS_IO_QUEUE_TYPES_ONLY__
#ifndef __WIN32__
#define ERTS_DEFAULT_MAX_PORTS (1 << 16)
@@ -75,23 +78,8 @@ typedef struct erts_driver_t_ erts_driver_t;
#define ERTS_Port2ErlDrvPort(PH) ((ErlDrvPort) (PH))
#endif
-#define SMALL_IO_QUEUE 5 /* Number of fixed elements */
+typedef ErtsIOQueue ErlPortIOQueue;
-typedef struct {
- ErlDrvSizeT size; /* total size in bytes */
-
- SysIOVec* v_start;
- SysIOVec* v_end;
- SysIOVec* v_head;
- SysIOVec* v_tail;
- SysIOVec v_small[SMALL_IO_QUEUE];
-
- ErlDrvBinary** b_start;
- ErlDrvBinary** b_end;
- ErlDrvBinary** b_head;
- ErlDrvBinary** b_tail;
- ErlDrvBinary* b_small[SMALL_IO_QUEUE];
-} ErlIOQueue;
typedef struct line_buf { /* Buffer used in line oriented I/O */
ErlDrvSizeT bufsiz; /* Size of character buffer */
@@ -172,7 +160,7 @@ struct _erl_drv_port {
Uint bytes_in; /* Number of bytes read */
Uint bytes_out; /* Number of bytes written */
- ErlIOQueue ioq; /* driver accessible i/o queue */
+ ErlPortIOQueue ioq; /* driver accessible i/o queue */
DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */
char *name; /* String used in the open */
erts_driver_t* drv_ptr;
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index a044de3fee..1ab1e47254 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -852,10 +852,11 @@ schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
}
static ERTS_INLINE void
-abort_nosuspend_task(Port *pp,
- ErtsPortTaskType type,
- ErtsPortTaskTypeData *tdp,
- int bpq_data)
+abort_signal_task(Port *pp,
+ int abort_type,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
{
ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
@@ -863,18 +864,28 @@ abort_nosuspend_task(Port *pp,
if (!bpq_data)
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
- ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ abort_type,
&tdp->psig.data);
else {
ErlDrvSizeT size = erts_proc2port_sig_command_data_size(&tdp->psig.data);
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
- ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ abort_type,
&tdp->psig.data);
aborted_proc2port_data(pp, size);
}
}
+
+static ERTS_INLINE void
+abort_nosuspend_task(Port *pp,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
+{
+ abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND, type, tdp, bpq_data);
+}
+
static ErtsPortTaskHandleList *
get_free_nosuspend_handles(Port *pp)
{
@@ -1613,8 +1624,9 @@ abort_nosuspend:
ASSERT(ns_pthlp);
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
- if (ptp)
- port_task_free(ptp);
+
+ ASSERT(ptp);
+ port_task_free(ptp);
return 0;
@@ -1625,12 +1637,15 @@ fail:
erts_port_dec_refc(pp);
#endif
+ if (ptp) {
+ abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT,
+ ptp->type, &ptp->u.alive.td, 0);
+ port_task_free(ptp);
+ }
+
if (ns_pthlp)
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
- if (ptp)
- port_task_free(ptp);
-
return -1;
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 9cca62ffaf..39f403b443 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -188,13 +188,7 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
ptsp->taskq.in.last = NULL;
erts_smp_atomic32_init_nob(&ptsp->flags, 0);
#ifdef ERTS_SMP
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x_opt(&ptsp->mtx, lock_str, instr_id,
- ((erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
- ? 0 : ERTS_LCNT_LT_DISABLE));
-#else
- erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id);
-#endif
+ erts_mtx_init(&ptsp->mtx, lock_str, instr_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
#endif
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index fc2b34e70f..63c838a91d 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -841,7 +841,9 @@ erts_late_init_process(void)
{
int ix;
- erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat");
+ erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+
for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
Eterm atom;
char *atom_str;
@@ -2010,12 +2012,13 @@ erts_schedule_multi_misc_aux_work(int ignore_self,
int id, self = 0;
if (ignore_self) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
- if (esdp)
- self = (int) esdp->no;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ /* ignore_self is meaningless on dirty schedulers since aux work can
+ * only run on normal schedulers, and their ids do not translate. */
+ if(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ self = (int)esdp->no;
+ }
}
ASSERT(0 < max_sched && max_sched <= erts_no_schedulers);
@@ -6232,13 +6235,17 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
* id if the esdp->no <-> ix+1 mapping change.
*/
- erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
+ erts_smp_mtx_init(&rq->mtx, "run_queue", make_small(ix + 1),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
erts_smp_cnd_init(&rq->cnd);
#ifdef ERTS_DIRTY_SCHEDULERS
#ifdef ERTS_SMP
- if (ERTS_RUNQ_IX_IS_DIRTY(ix))
- erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list");
+ if (ERTS_RUNQ_IX_IS_DIRTY(ix)) {
+ erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list",
+ make_small(ix + 1),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+ }
rq->sleepers.list = NULL;
#endif
#endif
@@ -6431,7 +6438,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
init_no_runqs(no_schedulers_online, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
- erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
+ erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
balance_info.forced_check_balance = 0;
balance_info.halftime = 1;
balance_info.full_reds_history_index = 0;
@@ -7493,7 +7501,8 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
static void
init_scheduler_suspend(void)
{
- erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
+ erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
schdlr_sspnd.online.normal = 1;
schdlr_sspnd.curr_online.normal = 1;
schdlr_sspnd.active.normal = 1;
@@ -11768,9 +11777,11 @@ flush_dirty_trace_messages(void *vpid)
erts_free(ERTS_ALC_T_DIRTY_SL, vpid);
#endif
- proc = erts_proc_lookup(pid);
- if (proc)
- (void) erts_flush_trace_messages(proc, 0);
+ proc = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_MAIN, 0);
+ if (proc) {
+ (void) erts_flush_trace_messages(proc, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ }
}
#endif /* ERTS_DIRTY_SCHEDULERS */
@@ -14102,8 +14113,11 @@ erts_continue_exit_process(Process *p)
have none here */
}
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
#ifdef ERTS_SMP
- erts_flush_trace_messages(p, 0);
+ erts_flush_trace_messages(p, ERTS_PROC_LOCK_MAIN);
#endif
ERTS_TRACER_CLEAR(&ERTS_TRACER(p));
@@ -14111,11 +14125,6 @@ erts_continue_exit_process(Process *p)
if (!delay_del_proc)
delete_process(p);
-#ifdef ERTS_SMP
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
-#endif
-
return;
yield:
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index c0e7380ed0..ff124d5ba7 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -112,21 +112,13 @@ static struct {
erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
-#ifdef ERTS_ENABLE_LOCK_COUNT
-static void lcnt_enable_proc_lock_count(Process *proc, int enable);
-#endif
-
void
erts_init_proc_lock(int cpus)
{
int i;
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x(&erts_pix_locks[i].u.mtx,
- "pix_lock", make_small(i));
-#else
- erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
-#endif
+ erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
}
#if ERTS_PROC_LOCK_OWN_IMPL
erts_thr_install_exit_handler(cleanup_tse);
@@ -944,7 +936,7 @@ erts_pid2proc_opt(Process *c_p,
erts_proc_inc_refc(proc);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
- erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
+ erts_lcnt_proc_lock_unacquire(&proc->lock, lcnt_locks);
#endif
managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED;
@@ -1062,32 +1054,38 @@ erts_proc_lock_init(Process *p)
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id);
+ erts_mtx_init(&p->lock.main, "proc_main", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.main.lc);
#endif
- erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id);
+ erts_mtx_init(&p->lock.link, "proc_link", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.link.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.link.lc);
#endif
- erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id);
+ erts_mtx_init(&p->lock.msgq, "proc_msgq", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
- erts_mtx_init_x(&p->lock.btm, "proc_btm", p->common.id);
+ erts_mtx_init(&p->lock.btm, "proc_btm", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.btm.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.btm.lc);
#endif
- erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id);
+ erts_mtx_init(&p->lock.status, "proc_status", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
#endif
- erts_mtx_init_x(&p->lock.trace, "proc_trace", p->common.id);
+ erts_mtx_init(&p->lock.trace, "proc_trace", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.trace.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.trace.lc);
@@ -1124,117 +1122,70 @@ erts_proc_lock_fin(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
-void erts_lcnt_enable_proc_lock_count(int enable) {
- int ix, max = erts_ptab_max(&erts_proc);
- Process *proc = NULL;
- for (ix = 0; ix < max; ++ix) {
- if ((proc = erts_pix2proc(ix)) != NULL)
- lcnt_enable_proc_lock_count(proc, enable);
- } /* for all processes */
-}
-
void erts_lcnt_proc_lock_init(Process *p) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) {
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_main));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_link));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_msgq));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_btm));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_status));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_trace));
- } else { /* now the common case */
- Eterm pid = (p->common.id != ERTS_INVALID_PID) ? p->common.id : NIL;
- erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_btm), "proc_btm", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_status),"proc_status",ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_trace), "proc_trace", ERTS_LCNT_LT_PROCLOCK, pid);
- } /* the lock names should really be aligned to four characters */
+ erts_lcnt_init_ref(&p->lock.lcnt_carrier);
+
+ if(erts_lcnt_check_enabled(ERTS_LOCK_FLAGS_CATEGORY_PROCESS)) {
+ erts_lcnt_enable_proc_lock_count(p, 1);
+ }
} /* logic reversed */
void erts_lcnt_proc_lock_destroy(Process *p) {
- erts_lcnt_destroy_lock(&(p->lock.lcnt_main));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_link));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_msgq));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_btm));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_status));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_trace));
+ erts_lcnt_uninstall(&p->lock.lcnt_carrier);
}
-static void lcnt_enable_proc_lock_count(Process *proc, int enable) {
- if (enable) {
- if (!ERTS_LCNT_LOCK_TYPE(&(proc->lock.lcnt_main))) {
- erts_lcnt_proc_lock_init(proc);
- }
- }
- else {
- if (ERTS_LCNT_LOCK_TYPE(&(proc->lock.lcnt_main))) {
- erts_lcnt_proc_lock_destroy(proc);
- }
+void erts_lcnt_enable_proc_lock_count(Process *proc, int enable) {
+ if(proc->common.id == ERTS_INVALID_PID) {
+ /* Locks without an id are more trouble than they're worth; there's no
+ * way to look them up and we can't track them with _STATIC since it's
+ * too early to tell whether we're a system process (proc->static_flags
+ * hasn't been not set yet). */
+ } else if(!enable) {
+ erts_lcnt_proc_lock_destroy(proc);
+ } else if(!erts_lcnt_check_ref_installed(&proc->lock.lcnt_carrier)) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+
+ carrier = erts_lcnt_create_lock_info_carrier(ERTS_LCNT_PROCLOCK_COUNT);
+
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN,
+ "proc_main", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK,
+ "proc_link", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ,
+ "proc_msgq", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM,
+ "proc_btm", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS,
+ "proc_status",proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE,
+ "proc_trace", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+
+ erts_lcnt_install(&proc->lock.lcnt_carrier, carrier);
}
}
-void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock(&(lock->lcnt_trace)); }
-}
+void erts_lcnt_update_process_locks(int enable) {
+ int i, max;
-void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
- char *file, unsigned int line) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) {
- erts_lcnt_lock_post_x(&(lock->lcnt_main), file, line);
- }
- if (locks & ERTS_PROC_LOCK_LINK) {
- erts_lcnt_lock_post_x(&(lock->lcnt_link), file, line);
- }
- if (locks & ERTS_PROC_LOCK_MSGQ) {
- erts_lcnt_lock_post_x(&(lock->lcnt_msgq), file, line);
- }
- if (locks & ERTS_PROC_LOCK_BTM) {
- erts_lcnt_lock_post_x(&(lock->lcnt_btm), file, line);
- }
- if (locks & ERTS_PROC_LOCK_STATUS) {
- erts_lcnt_lock_post_x(&(lock->lcnt_status), file, line);
- }
- if (locks & ERTS_PROC_LOCK_TRACE) {
- erts_lcnt_lock_post_x(&(lock->lcnt_trace), file, line);
- }
-}
+ max = erts_ptab_max(&erts_proc);
-void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock_unaquire(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock_unaquire(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock_unaquire(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock_unaquire(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock_unaquire(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock_unaquire(&(lock->lcnt_trace)); }
-}
+ for(i = 0; i < max; i++) {
+ int delay_handle;
+ Process *proc;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+ proc = erts_pix2proc(i);
+
+ if(proc != NULL) {
+ erts_lcnt_enable_proc_lock_count(proc, enable);
+ }
-void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_unlock(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_unlock(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_unlock(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_unlock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_unlock(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_unlock(&(lock->lcnt_trace)); }
+ if(delay_handle != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ }
+ }
}
-void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_trylock(&(lock->lcnt_main), res); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_trylock(&(lock->lcnt_link), res); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_trylock(&(lock->lcnt_msgq), res); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_trylock(&(lock->lcnt_btm), res); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_trylock(&(lock->lcnt_status), res); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_trylock(&(lock->lcnt_trace), res); }
-} /* reversed logic */
+
#endif /* ERTS_ENABLE_LOCK_COUNT */
@@ -1249,7 +1200,7 @@ erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_lock_x(&lck,file,line);
@@ -1282,7 +1233,7 @@ erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_trylock_x(locked, &lck, file, line);
@@ -1314,7 +1265,7 @@ erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_unlock(&lck);
@@ -1349,7 +1300,7 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_might_unlock(&lck);
@@ -1397,7 +1348,7 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_require_lock(&lck, file, line);
@@ -1444,7 +1395,7 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_unrequire_lock(&lck);
@@ -1493,7 +1444,7 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCKS_ALL) {
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN)
lck.id = lc_id.proc_lock_main;
@@ -1524,7 +1475,7 @@ void erts_proc_lc_chk_only_proc_main(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL
#define ERTS_PROC_LC_EMPTY_LOCK_INIT \
- ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_PROCLOCK)
+ ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LOCK_TYPE_PROCLOCK)
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks)
@@ -1739,22 +1690,22 @@ erts_proc_lc_my_proc_locks(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t locks[6] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_btm,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_trace,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK)};
+ ERTS_LOCK_TYPE_PROCLOCK)};
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_lc_lock_t locks[6] = {p->lock.main.lc,
p->lock.link.lc,
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 6e704b185d..023ba4d4ae 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -78,13 +78,19 @@ typedef struct erts_proc_lock_t_ {
ErtsProcLocks flags;
#endif
erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt_main;
- erts_lcnt_lock_t lcnt_link;
- erts_lcnt_lock_t lcnt_msgq;
- erts_lcnt_lock_t lcnt_btm;
- erts_lcnt_lock_t lcnt_status;
- erts_lcnt_lock_t lcnt_trace;
+#if defined(ERTS_ENABLE_LOCK_COUNT) && !ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ /* Each erts_mtx_t has its own lock counter ^ */
+
+ #define ERTS_LCNT_PROCLOCK_IDX_MAIN 0
+ #define ERTS_LCNT_PROCLOCK_IDX_LINK 1
+ #define ERTS_LCNT_PROCLOCK_IDX_MSGQ 2
+ #define ERTS_LCNT_PROCLOCK_IDX_BTM 3
+ #define ERTS_LCNT_PROCLOCK_IDX_STATUS 4
+ #define ERTS_LCNT_PROCLOCK_IDX_TRACE 5
+
+ #define ERTS_LCNT_PROCLOCK_COUNT 6
+
+ erts_lcnt_ref_t lcnt_carrier;
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_mtx_t main;
@@ -245,14 +251,170 @@ typedef struct erts_proc_lock_t_ {
void erts_lcnt_proc_lock_init(Process *p);
void erts_lcnt_proc_lock_destroy(Process *p);
+
+ERTS_GLB_INLINE
void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, char *file, unsigned int line);
-void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_unacquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res);
-void erts_lcnt_enable_proc_lock_count(int enable);
+void erts_lcnt_enable_proc_lock_count(Process *proc, int enable);
+void erts_lcnt_update_process_locks(int enable);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
+ char *file, unsigned int line) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE, file, line);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_unacquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN, res);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK, res);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ, res);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM, res);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS, res);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE, res);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+} /* reversed logic */
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* ERTS_ENABLE_LOCK_COUNT*/
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index c3d59cb3a8..b3bcb3af3f 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -372,7 +372,8 @@ erts_ptab_init_table(ErtsPTab *ptab,
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name);
+ erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name, NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0);
last_data_init_nob(ptab, ~((Uint64) 0));
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
index cab4bd73db..96238318c9 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -161,7 +161,7 @@ enqueue_remote_managed_thread(erts_sspa_chunk_header_t *chdr,
if ((i & 1) == 0)
itmp = itmp2;
else {
- enq = (erts_sspa_blk_t *) itmp;
+ enq = (erts_sspa_blk_t *) itmp2;
itmp = erts_atomic_read_acqb(&enq->next_atmc);
ASSERT(itmp != ERTS_AINT_NULL);
}
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index 181736b009..696bdbdaf1 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -128,14 +128,14 @@ ERTS_GLB_INLINE int erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y);
#define ERTS_SMP_HAVE_REC_MTX_INIT 1
ERTS_GLB_INLINE void erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx);
#endif
-ERTS_GLB_INLINE void erts_smp_mtx_init_x(erts_smp_mtx_t *mtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
+ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
@@ -153,18 +153,15 @@ ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd,
ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd);
ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd);
ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name);
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
@@ -181,11 +178,10 @@ ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock);
ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -194,11 +190,10 @@ ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file,
ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock);
#endif
ERTS_GLB_INLINE int erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -1062,34 +1057,18 @@ erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx)
#endif
ERTS_GLB_INLINE void
-erts_smp_mtx_init_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
+erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_mtx_init_x(mtx, name, extra);
+ erts_mtx_init(mtx, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
-erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
+erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_mtx_init_locked_x_opt(mtx, name, extra, 0);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_mtx_init(mtx, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_mtx_init_locked(mtx, name);
+ erts_mtx_init_locked(mtx, name, extra, flags);
#endif
}
@@ -1211,39 +1190,25 @@ erts_smp_rwmtx_set_reader_group(int no)
}
ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name,
- Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_opt_x(rwmtx, opt, name, extra);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
+erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_rwmtx_init_x(rwmtx, name, extra);
+ erts_smp_rwmtx_init_opt(rwmtx, NULL, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_opt(rwmtx, opt, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name)
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_rwmtx_init(rwmtx, name);
+ erts_rwmtx_init_opt(rwmtx, opt, name, extra, flags);
#endif
}
@@ -1379,20 +1344,10 @@ erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx)
}
ERTS_GLB_INLINE void
-erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_spinlock_init_x(lock, name, extra);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name)
+erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_spinlock_init(lock, name);
+ erts_spinlock_init(lock, name, extra, flags);
#else
(void)lock;
#endif
@@ -1445,20 +1400,10 @@ erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwlock_init_x(lock, name, extra);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name)
+erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_rwlock_init(lock, name);
+ erts_rwlock_init(lock, name, extra, flags);
#else
(void)lock;
#endif
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 700ed90def..2a9f276e02 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -321,13 +321,23 @@ tmp_thr_prgr_data(ErtsSchedulerData *esdp)
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(esdp);
if (!tpd) {
- /*
- * We only allocate the part up to the wakeup_request field
- * which is the first field only used by registered threads
- */
- tpd = erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA,
- offsetof(ErtsThrPrgrData, wakeup_request));
- init_tmp_thr_prgr_data(tpd);
+ /*
+ * We only allocate the part up to the wakeup_request field which is
+ * the first field only used by registered threads
+ */
+ size_t alloc_size = offsetof(ErtsThrPrgrData, wakeup_request);
+
+ /* We may land here as a result of unmanaged_delay being called from
+ * the lock counting module, which in turn might be called from within
+ * the allocator, so we use plain malloc to avoid deadlocks. */
+ tpd =
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ malloc(alloc_size);
+#else
+ erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA, alloc_size);
+#endif
+
+ init_tmp_thr_prgr_data(tpd);
}
return tpd;
@@ -337,8 +347,13 @@ static ERTS_INLINE void
return_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
{
if (tpd->is_temporary) {
- erts_tsd_set(erts_thr_prgr_data_key__, NULL);
- erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+ erts_tsd_set(erts_thr_prgr_data_key__, NULL);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ free(tpd);
+#else
+ erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+#endif
}
}
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 9612b70469..8b5c17d739 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -259,13 +259,16 @@
#include "sys.h"
+#include "erl_lock_flags.h"
+#include "erl_term.h"
+
#ifdef USE_THREADS
#define ETHR_TRY_INLINE_FUNCS
#include "ethread.h"
+
#include "erl_lock_check.h"
#include "erl_lock_count.h"
-#include "erl_term.h"
#if defined(__GLIBC__) && (__GLIBC__ << 16) + __GLIBC_MINOR__ < (2 << 16) + 4
/*
@@ -307,9 +310,11 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
-
} erts_mtx_t;
typedef ethr_cond erts_cnd_t;
@@ -320,7 +325,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_rwmtx_t;
@@ -365,7 +373,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_spinlock_t;
@@ -376,7 +387,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_rwlock_t;
@@ -479,11 +493,14 @@ ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_thr_getname(erts_tid_t tid, char *buf, size_t len);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
-ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
-ERTS_GLB_INLINE void erts_mtx_init_locked_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
-ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
+ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_mtx_trylock_x(erts_mtx_t *mtx, char *file,
@@ -502,18 +519,15 @@ ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no);
-ERTS_GLB_INLINE void erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name);
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
@@ -603,16 +617,10 @@ ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp
ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
erts_aint64_t mask,
erts_aint64_t set);
-
-ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
- char *name,
- Eterm extra,
- Uint16 opt);
-ERTS_GLB_INLINE void erts_spinlock_init_x(erts_spinlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock);
ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -621,11 +629,10 @@ ERTS_GLB_INLINE void erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigne
ERTS_GLB_INLINE void erts_spin_lock(erts_spinlock_t *lock);
#endif
ERTS_GLB_INLINE int erts_lc_spinlock_is_locked(erts_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_rwlock_init_x(erts_rwlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -2159,97 +2166,41 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
}
ERTS_GLB_INLINE void
-erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
+erts_mtx_init(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
-#endif
-#endif
-}
+ if (res) {
+ erts_thr_fatal_error(res, "initialize mutex");
+ }
-ERTS_GLB_INLINE void
-erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
-#endif
+ flags |= ERTS_LOCK_TYPE_MUTEX;
+#ifdef DEBUG
+ mtx->flags = flags;
#endif
-}
-
-ERTS_GLB_INLINE void
-erts_mtx_init_locked_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
-#endif
- ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_trylock(1, &mtx->lc);
+ erts_lc_init_lock_x(&mtx->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock(&mtx->lcnt, 1);
-#endif
+ erts_lcnt_init_ref_x(&mtx->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
-erts_mtx_init(erts_mtx_t *mtx, char *name)
+erts_mtx_init_locked(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
-#endif
-#endif
-}
+ erts_mtx_init(mtx, name, extra, flags);
-ERTS_GLB_INLINE void
-erts_mtx_init_locked(erts_mtx_t *mtx, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
-#endif
ethr_mutex_lock(&mtx->mtx);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_trylock(1, &mtx->lc);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock(&mtx->lcnt, 1);
-#endif
+ #ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &mtx->lc);
+ #endif
+ #ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&mtx->lcnt, 1);
+ #endif
#endif
}
@@ -2258,11 +2209,14 @@ erts_mtx_destroy(erts_mtx_t *mtx)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(mtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&mtx->lcnt);
+ erts_lcnt_uninstall(&mtx->lcnt);
#endif
res = ethr_mutex_destroy(&mtx->mtx);
if (res != 0) {
@@ -2359,7 +2313,8 @@ erts_lc_mtx_is_locked(erts_mtx_t *mtx)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = 0;
+ lc.flags = ERTS_LOCK_FLAGS_TYPE_MUTEX;
+ lc.taken_options = 0;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2459,7 +2414,7 @@ erts_rwmtx_set_reader_group(int no)
#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_check_no_locked_of_type(ERTS_LC_FLG_LT_RWMUTEX);
+ erts_lc_check_no_locked_of_type(ERTS_LOCK_TYPE_RWMUTEX);
#endif
res = ethr_rwmutex_set_reader_group(no);
if (res != 0)
@@ -2468,57 +2423,32 @@ erts_rwmtx_set_reader_group(int no)
}
ERTS_GLB_INLINE void
-erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name,
- Eterm extra)
-{
+erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, erts_rwmtx_opt_t *opt,
+ char *name, Eterm extra, erts_lock_flags_t flags) {
#ifdef USE_THREADS
int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize rwmutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (name && name[0] == '\0')
- erts_lcnt_init_lock_x(&rwmtx->lcnt, NULL, ERTS_LCNT_LT_RWMUTEX, extra);
- else
- erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
-#endif
-#endif
-}
+ if (res != 0) {
+ erts_thr_fatal_error(res, "initialize rwmutex");
+ }
-ERTS_GLB_INLINE void
-erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
- char *name,
- Eterm extra)
-{
- erts_rwmtx_init_opt_x(rwmtx, NULL, name, extra);
-}
+ flags |= ERTS_LOCK_TYPE_RWMUTEX;
+#ifdef DEBUG
+ rwmtx->flags = flags;
+#endif
-ERTS_GLB_INLINE void
-erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX);
+ erts_lc_init_lock_x(&rwmtx->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX);
-#endif
+ erts_lcnt_init_ref_x(&rwmtx->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
-erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
-{
- erts_rwmtx_init_opt(rwmtx, NULL, name);
+erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name, Eterm extra,
+ erts_lock_flags_t flags) {
+ erts_rwmtx_init_opt(rwmtx, NULL, name, extra, flags);
}
ERTS_GLB_INLINE void
@@ -2526,11 +2456,14 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(rwmtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&rwmtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&rwmtx->lcnt);
+ erts_lcnt_uninstall(&rwmtx->lcnt);
#endif
res = ethr_rwmutex_destroy(&rwmtx->rwmtx);
if (res != 0) {
@@ -2558,7 +2491,7 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ))
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ))
return EBUSY; /* Make sure caller can handle the situation without
causing a lock order violation */
#endif
@@ -2567,13 +2500,13 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LOCK_OPTIONS_READ);
#endif
return res;
@@ -2592,13 +2525,13 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_rlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -2612,10 +2545,10 @@ erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_runlock(&rwmtx->rwmtx);
#endif
@@ -2633,7 +2566,7 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE))
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR))
return EBUSY; /* Make sure caller can handle the situation without
causing a lock order violation */
#endif
@@ -2642,13 +2575,13 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LOCK_OPTIONS_RDWR);
#endif
return res;
@@ -2667,13 +2600,13 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -2687,10 +2620,10 @@ erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
#endif
@@ -2728,7 +2661,8 @@ erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = ERTS_LC_FLG_LO_READ;
+ lc.flags = ERTS_LOCK_TYPE_RWMUTEX;
+ lc.taken_options = ERTS_LOCK_OPTIONS_READ;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2742,7 +2676,8 @@ erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ lc.flags = ERTS_LOCK_TYPE_RWMUTEX;
+ lc.taken_options = ERTS_LOCK_OPTIONS_RDWR;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3075,59 +3010,26 @@ erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
/* spinlock */
ERTS_GLB_INLINE void
-erts_spinlock_init_x(erts_spinlock_t *lock, char *name, Eterm extra)
+erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK, extra);
-#endif
-#else
- (void)lock;
-#endif
-}
+ if (res) {
+ erts_thr_fatal_error(res, "init spinlock");
+ }
-ERTS_GLB_INLINE void
-erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, Eterm extra,
- Uint16 opt)
-{
-#ifdef USE_THREADS
- int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK|opt, extra);
+ flags |= ERTS_LOCK_TYPE_SPINLOCK;
+#ifdef DEBUG
+ lock->flags = flags;
#endif
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_spinlock_init(erts_spinlock_t *lock, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK);
+ erts_lc_init_lock_x(&lock->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK);
-#endif
-#else
- (void)lock;
+ erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
@@ -3135,11 +3037,14 @@ erts_spinlock_destroy(erts_spinlock_t *lock)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&lock->lcnt);
+ erts_lcnt_uninstall(&lock->lcnt);
#endif
res = ethr_spinlock_destroy(&lock->slck);
if (res != 0) {
@@ -3207,7 +3112,8 @@ erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = 0;
+ lc.flags = ERTS_LOCK_TYPE_SPINLOCK;
+ lc.taken_options = 0;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3218,39 +3124,26 @@ erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
/* rwspinlock */
ERTS_GLB_INLINE void
-erts_rwlock_init_x(erts_rwlock_t *lock, char *name, Eterm extra)
+erts_rwlock_init(erts_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
int res = ethr_rwlock_init(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "init rwlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK, extra);
-#endif
-#else
- (void)lock;
+ if (res) {
+ erts_thr_fatal_error(res, "init rwlock");
+ }
+
+ flags |= ERTS_LOCK_TYPE_RWSPINLOCK;
+#ifdef DEBUG
+ lock->flags = flags;
#endif
-}
-ERTS_GLB_INLINE void
-erts_rwlock_init(erts_rwlock_t *lock, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_rwlock_init(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "init rwlock");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK);
+ erts_lc_init_lock_x(&lock->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK);
-#endif
-#else
- (void)lock;
+ erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
@@ -3258,11 +3151,14 @@ erts_rwlock_destroy(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&lock->lcnt);
+ erts_lcnt_uninstall(&lock->lcnt);
#endif
res = ethr_rwlock_destroy(&lock->rwlck);
if (res != 0) {
@@ -3286,10 +3182,10 @@ erts_read_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_read_unlock(&lock->rwlck);
#else
@@ -3307,13 +3203,13 @@ erts_read_lock(erts_rwlock_t *lock)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_lock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_read_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -3329,10 +3225,10 @@ erts_write_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_write_unlock(&lock->rwlck);
#else
@@ -3350,13 +3246,13 @@ erts_write_lock(erts_rwlock_t *lock)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_lock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_write_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -3373,7 +3269,8 @@ erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = ERTS_LC_FLG_LO_READ;
+ lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK;
+ lc.taken_options = ERTS_LOCK_OPTIONS_READ;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3387,7 +3284,8 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK;
+ lc.taken_options = ERTS_LOCK_OPTIONS_RDWR;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index ccc5526664..27164d50a0 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -130,6 +130,13 @@ Eterm erts_get_monotonic_end_time(struct process *c_p);
Eterm erts_monotonic_time_source(struct process*c_p);
Eterm erts_system_time_source(struct process*c_p);
+void erts_runtime_elapsed_both(ErtsMonotonicTime *ms_user,
+ ErtsMonotonicTime *ms_sys,
+ ErtsMonotonicTime *ms_user_diff,
+ ErtsMonotonicTime *ms_sys_diff);
+void erts_wall_clock_elapsed_both(ErtsMonotonicTime *total,
+ ErtsMonotonicTime *diff);
+
#ifdef SYS_CLOCK_RESOLUTION
#define ERTS_CLKTCK_RESOLUTION ((ErtsMonotonicTime) (SYS_CLOCK_RESOLUTION*1000))
#else
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 3084a8db75..979c03fd43 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -36,12 +36,29 @@
#include "erl_driver.h"
#include "erl_nif.h"
-static erts_smp_mtx_t erts_timeofday_mtx;
static erts_smp_mtx_t erts_get_time_mtx;
-static SysTimes t_start; /* Used in elapsed_time_both */
-static ErtsMonotonicTime prev_wall_clock_elapsed; /* Used in wall_clock_elapsed_time_both */
-static ErtsMonotonicTime previous_now; /* Used in get_now */
+ /* used by erts_runtime_elapsed_both */
+typedef struct {
+ erts_smp_mtx_t mtx;
+ ErtsMonotonicTime user;
+ ErtsMonotonicTime sys;
+} ErtsRunTimePrevData;
+
+static union {
+ ErtsRunTimePrevData data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsRunTimePrevData))];
+} runtime_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static union {
+ erts_smp_atomic64_t time;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_smp_atomic64_t))];
+} wall_clock_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static union {
+ erts_smp_atomic64_t time;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_smp_atomic64_t))];
+} now_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE);
static ErtsMonitor *time_offset_monitors = NULL;
static Uint no_time_offset_monitors = 0;
@@ -954,8 +971,12 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
ASSERT(ERTS_MONOTONIC_TIME_MIN < ERTS_MONOTONIC_TIME_MAX);
- erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday");
- erts_smp_mtx_init(&erts_get_time_mtx, "get_time");
+ erts_smp_mtx_init(&erts_get_time_mtx, "get_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ erts_smp_mtx_init(&runtime_prev.data.mtx, "runtime", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ runtime_prev.data.user = 0;
+ runtime_prev.data.sys = 0;
time_sup.r.o.correction = time_correction;
time_sup.r.o.warp_mode = time_warp_mode;
@@ -1120,8 +1141,9 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx,
- &rwmtx_opts, "get_corrected_time");
+ erts_smp_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx, &rwmtx_opts,
+ "get_corrected_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
cdatap = &time_sup.inf.c.parmon.cdata;
@@ -1154,9 +1176,13 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
time_sup.f.c.last_not_corrected_time = 0;
}
- prev_wall_clock_elapsed = 0;
+ erts_smp_atomic64_init_nob(&wall_clock_prev.time,
+ (erts_aint64_t) 0);
+
+ erts_smp_atomic64_init_nob(
+ &now_prev.time,
+ (erts_aint64_t) ERTS_MONOTONIC_TO_USEC(get_time_offset()));
- previous_now = ERTS_MONOTONIC_TO_USEC(get_time_offset());
#ifdef DEBUG
time_sup_initialized = 1;
@@ -1286,56 +1312,93 @@ erts_finalize_time_offset(void)
/* info functions */
void
-elapsed_time_both(UWord *ms_user, UWord *ms_sys,
- UWord *ms_user_diff, UWord *ms_sys_diff)
+erts_runtime_elapsed_both(ErtsMonotonicTime *ms_user, ErtsMonotonicTime *ms_sys,
+ ErtsMonotonicTime *ms_user_diff, ErtsMonotonicTime *ms_sys_diff)
{
- UWord prev_total_user, prev_total_sys;
- UWord total_user, total_sys;
+ ErtsMonotonicTime prev_user, prev_sys, user, sys;
+
+#ifdef HAVE_GETRUSAGE
+
+ struct rusage now;
+
+ if (getrusage(RUSAGE_SELF, &now) != 0) {
+ erts_exit(ERTS_ABORT_EXIT, "getrusage(RUSAGE_SELF, _) failed: %d\n", errno);
+ return;
+ }
+
+ user = (ErtsMonotonicTime) now.ru_utime.tv_sec;
+ user *= (ErtsMonotonicTime) 1000000;
+ user += (ErtsMonotonicTime) now.ru_utime.tv_usec;
+ user /= (ErtsMonotonicTime) 1000;
+
+ sys = (ErtsMonotonicTime) now.ru_stime.tv_sec;
+ sys *= (ErtsMonotonicTime) 1000000;
+ sys += (ErtsMonotonicTime) now.ru_stime.tv_usec;
+ sys /= (ErtsMonotonicTime) 1000;
+
+#else
+
SysTimes now;
sys_times(&now);
- total_user = (now.tms_utime * 1000) / SYS_CLK_TCK;
- total_sys = (now.tms_stime * 1000) / SYS_CLK_TCK;
+ user = (ErtsMonotonicTime) now.tms_utime;
+ user *= (ErtsMonotonicTime) 1000;
+ user /= (ErtsMonotonicTime) SYS_CLK_TCK;
- if (ms_user != NULL)
- *ms_user = total_user;
- if (ms_sys != NULL)
- *ms_sys = total_sys;
+ sys = (ErtsMonotonicTime) now.tms_stime;
+ sys *= (ErtsMonotonicTime) 1000;
+ sys /= (ErtsMonotonicTime) SYS_CLK_TCK;
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- prev_total_user = (t_start.tms_utime * 1000) / SYS_CLK_TCK;
- prev_total_sys = (t_start.tms_stime * 1000) / SYS_CLK_TCK;
- t_start = now;
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+#endif
+
+ if (ms_user)
+ *ms_user = user;
+ if (ms_sys)
+ *ms_sys = sys;
- if (ms_user_diff != NULL)
- *ms_user_diff = total_user - prev_total_user;
-
- if (ms_sys_diff != NULL)
- *ms_sys_diff = total_sys - prev_total_sys;
+ if (ms_user_diff || ms_sys_diff) {
+
+ erts_smp_mtx_lock(&runtime_prev.data.mtx);
+
+ prev_user = runtime_prev.data.user;
+ prev_sys = runtime_prev.data.sys;
+ runtime_prev.data.user = user;
+ runtime_prev.data.sys = sys;
+
+ erts_smp_mtx_unlock(&runtime_prev.data.mtx);
+
+ if (ms_user_diff)
+ *ms_user_diff = user - prev_user;
+ if (ms_sys_diff)
+ *ms_sys_diff = sys - prev_sys;
+ }
}
/* wall clock routines */
void
-wall_clock_elapsed_time_both(UWord *ms_total, UWord *ms_diff)
+erts_wall_clock_elapsed_both(ErtsMonotonicTime *ms_total, ErtsMonotonicTime *ms_diff)
{
ErtsMonotonicTime now, elapsed;
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
now = time_sup.r.o.get_time();
update_last_mtime(NULL, now);
elapsed = ERTS_MONOTONIC_TO_MSEC(now);
- *ms_total = (UWord) elapsed;
- *ms_diff = (UWord) (elapsed - prev_wall_clock_elapsed);
- prev_wall_clock_elapsed = elapsed;
+ elapsed -= ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_BEGIN);
+
+ *ms_total = elapsed;
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ if (ms_diff) {
+ ErtsMonotonicTime prev;
+
+ prev = ((ErtsMonotonicTime)
+ erts_smp_atomic64_xchg_mb(&wall_clock_prev.time,
+ (erts_aint64_t) elapsed));
+
+ *ms_diff = elapsed - prev;
+ }
}
/* get current time */
@@ -1713,22 +1776,27 @@ univ_to_local(Sint *year, Sint *month, Sint *day,
void
get_now(Uint* megasec, Uint* sec, Uint* microsec)
{
- ErtsMonotonicTime now_megasec, now_sec, now, mtime, time_offset;
+ ErtsMonotonicTime now_megasec, now_sec, now, prev, mtime, time_offset;
mtime = time_sup.r.o.get_time();
time_offset = get_time_offset();
update_last_mtime(NULL, mtime);
now = ERTS_MONOTONIC_TO_USEC(mtime + time_offset);
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
/* Make sure now time is later than last time */
- if (now <= previous_now)
- now = previous_now + 1;
-
- previous_now = now;
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ prev = erts_smp_atomic64_read_nob(&now_prev.time);
+ while (1) {
+ ErtsMonotonicTime act;
+ if (now <= prev)
+ now = prev + 1;
+ act = ((ErtsMonotonicTime)
+ erts_smp_atomic64_cmpxchg_mb(&now_prev.time,
+ (erts_aint64_t) now,
+ (erts_aint64_t) prev));
+ if (act == prev)
+ break;
+ prev = act;
+ }
now_megasec = now / ERTS_MONOTONIC_TIME_TERA;
now_sec = now / ERTS_MONOTONIC_TIME_MEGA;
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 4b06c55770..db7d0ac449 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -336,7 +336,8 @@ void erts_init_trace(void) {
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers");
+ erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
#ifdef HAVE_ERTS_NOW_CPU
erts_cpu_timestamp = 0;
@@ -2625,7 +2626,8 @@ init_sys_msg_dispatcher(void)
sys_message_queue = NULL;
sys_message_queue_end = NULL;
erts_smp_cnd_init(&smq_cnd);
- erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
+ erts_smp_mtx_init(&smq_mtx, "sys_msg_q", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_smp_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
@@ -3185,7 +3187,9 @@ static void init_tracer_nif()
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx");
+
+ erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_tracer_nif_clear();
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 07cf4f6903..3d28b05752 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -131,6 +131,7 @@ Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
+#define erts_bld_monotonic_time erts_bld_sint64
Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
#define erts_bld_tuple2(H,S,E1,E2) erts_bld_tuple(H,S,2,E1,E2)
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 57f5ba5436..828c833ffc 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -182,7 +182,8 @@ init_export_table(void)
HashFunctions f;
int i;
- erts_smp_mtx_init(&export_staging_lock, "export_tab");
+ erts_smp_mtx_init(&export_staging_lock, "export_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
erts_smp_atomic_init_nob(&total_entries_bytes, 0);
f.hash = (H_FUN) export_hash;
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index fc95535ec3..182d3aa44e 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1183,7 +1183,8 @@ void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon);
Eterm erts_driver_monitor_to_ref(Eterm* hp, const ErlDrvMonitor *mon);
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
-void erts_lcnt_enable_io_lock_count(int enable);
+void erts_lcnt_update_driver_locks(int enable);
+void erts_lcnt_update_port_locks(int enable);
#endif
/* driver_tab.c */
@@ -1287,7 +1288,8 @@ int erts_utf8_to_latin1(byte* dest, const byte* source, int slen);
void bin_write(fmtfn_t, void*, byte*, size_t);
Sint intlist_to_buf(Eterm, char*, Sint); /* most callers pass plain char*'s */
-Sint erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len);
+int erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written);
+Sint erts_unicode_list_to_buf_len(Eterm list);
struct Sint_buf {
#if defined(ARCH_64)
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index d25e53ada0..3a5ddde5f4 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -52,6 +52,7 @@
#include "erl_bif_unique.h"
#include "erl_hl_timer.h"
#include "erl_time.h"
+#include "erl_io_queue.h"
extern ErlDrvEntry fd_driver_entry;
extern ErlDrvEntry vanilla_driver_entry;
@@ -108,7 +109,7 @@ static void driver_monitor_unlock_pdl(Port *p);
#define ERL_SMALL_IO_BIN_LIMIT (4*ERL_ONHEAP_BIN_LIMIT)
#define SMALL_WRITE_VEC 16
-static ERTS_INLINE ErlIOQueue*
+static ERTS_INLINE ErlPortIOQueue*
drvport2ioq(ErlDrvPort drvport)
{
Port *prt = erts_thr_drvport2port(drvport, 0);
@@ -123,11 +124,11 @@ is_port_ioq_empty(Port *pp)
int res;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
if (!pp->port_data_lock)
- res = (pp->ioq.size == 0);
+ res = (erts_ioq_size(&pp->ioq) == 0);
else {
ErlDrvPDL pdl = pp->port_data_lock;
erts_mtx_lock(&pdl->mtx);
- res = (pp->ioq.size == 0);
+ res = (erts_ioq_size(&pp->ioq) == 0);
erts_mtx_unlock(&pdl->mtx);
}
return res;
@@ -142,14 +143,14 @@ erts_is_port_ioq_empty(Port *pp)
Uint
erts_port_ioq_size(Port *pp)
{
- int res;
+ ErlDrvSizeT res;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
if (!pp->port_data_lock)
- res = pp->ioq.size;
+ res = erts_ioq_size(&pp->ioq);
else {
ErlDrvPDL pdl = pp->port_data_lock;
erts_mtx_lock(&pdl->mtx);
- res = pp->ioq.size;
+ res = erts_ioq_size(&pp->ioq);
erts_mtx_unlock(&pdl->mtx);
}
return (Uint) res;
@@ -258,14 +259,7 @@ static ERTS_INLINE void port_init_instr(Port *prt
#ifdef ERTS_SMP
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
- char *lock_str = "port_lock";
-#ifdef ERTS_ENABLE_LOCK_COUNT
- Uint16 opt = ((erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
- ? 0 : ERTS_LCNT_LT_DISABLE);
-#else
- Uint16 opt = 0;
-#endif
- erts_mtx_init_locked_x_opt(prt->lock, lock_str, id, opt);
+ erts_mtx_init_locked(prt->lock, "port_lock", id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
#endif
erts_port_task_init_sched(&prt->sched, id);
@@ -515,41 +509,17 @@ erts_port_free(Port *prt)
*/
static void initq(Port* prt)
{
- ErlIOQueue* q = &prt->ioq;
-
ERTS_LC_ASSERT(!prt->port_data_lock);
-
- q->size = 0;
- q->v_head = q->v_tail = q->v_start = q->v_small;
- q->v_end = q->v_small + SMALL_IO_QUEUE;
- q->b_head = q->b_tail = q->b_start = q->b_small;
- q->b_end = q->b_small + SMALL_IO_QUEUE;
+ erts_ioq_init(&prt->ioq, ERTS_ALC_T_IOQ, 1);
}
static void stopq(Port* prt)
{
- ErlIOQueue* q;
- ErlDrvBinary** binp;
if (prt->port_data_lock)
driver_pdl_lock(prt->port_data_lock);
- q = &prt->ioq;
- binp = q->b_head;
-
- if (q->v_start != q->v_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
-
- while(binp < q->b_tail) {
- if (*binp != NULL)
- driver_free_binary(*binp);
- binp++;
- }
- if (q->b_start != q->b_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
- q->v_start = q->v_end = q->v_head = q->v_tail = NULL;
- q->b_start = q->b_end = q->b_head = q->b_tail = NULL;
- q->size = 0;
+ erts_ioq_clear(&prt->ioq);
if (prt->port_data_lock) {
driver_pdl_unlock(prt->port_data_lock);
@@ -930,311 +900,6 @@ int erts_port_handle_xports(Port *prt)
}
#endif
-/* Fills a possibly deep list of chars and binaries into vec
-** Small characters are first stored in the buffer buf of length ln
-** binaries found are copied and linked into msoh
-** Return vector length on succsess,
-** -1 on overflow
-** -2 on type error
-*/
-
-#ifdef DEBUG
-#define MAX_SYSIOVEC_IOVLEN (1ull << (32 - 1))
-#else
-#define MAX_SYSIOVEC_IOVLEN (1ull << (sizeof(((SysIOVec*)0)->iov_len) * 8 - 1))
-#endif
-
-static ERTS_INLINE void
-io_list_to_vec_set_vec(SysIOVec **iov, ErlDrvBinary ***binv,
- ErlDrvBinary *bin, byte *ptr, Uint len,
- int *vlen)
-{
- while (len > MAX_SYSIOVEC_IOVLEN) {
- (*iov)->iov_base = ptr;
- (*iov)->iov_len = MAX_SYSIOVEC_IOVLEN;
- ptr += MAX_SYSIOVEC_IOVLEN;
- len -= MAX_SYSIOVEC_IOVLEN;
- (*iov)++;
- (*vlen)++;
- *(*binv)++ = bin;
- }
- (*iov)->iov_base = ptr;
- (*iov)->iov_len = len;
- *(*binv)++ = bin;
- (*iov)++;
- (*vlen)++;
-}
-
-static int
-io_list_to_vec(Eterm obj, /* io-list */
- SysIOVec* iov, /* io vector */
- ErlDrvBinary** binv, /* binary reference vector */
- ErlDrvBinary* cbin, /* binary to store characters */
- ErlDrvSizeT bin_limit) /* small binaries limit */
-{
- DECLARE_ESTACK(s);
- Eterm* objp;
- byte *buf = (byte*)cbin->orig_bytes;
- Uint len = cbin->orig_size;
- Uint csize = 0;
- int vlen = 0;
- byte* cptr = buf;
-
- goto L_jump_start; /* avoid push */
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_jump_start:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
- if (is_byte(obj)) {
- if (len == 0)
- goto L_overflow;
- *buf++ = unsigned_val(obj);
- csize++;
- len--;
- } else if (is_binary(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto handle_binary;
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (!is_nil(obj)) {
- goto L_type_error;
- }
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj)) {
- goto handle_binary;
- } else if (!is_nil(obj)) {
- goto L_type_error;
- }
- } else if (is_binary(obj)) {
- Eterm real_bin;
- Uint offset;
- Eterm* bptr;
- ErlDrvSizeT size;
- int bitoffs;
- int bitsize;
-
- handle_binary:
- size = binary_size(obj);
- ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
- ASSERT(bitsize == 0);
- bptr = binary_val(real_bin);
- if (*bptr == HEADER_PROC_BIN) {
- ProcBin* pb = (ProcBin *) bptr;
- if (bitoffs != 0) {
- if (len < size) {
- goto L_overflow;
- }
- erts_copy_bits(pb->bytes+offset, bitoffs, 1,
- (byte *) buf, 0, 1, size*8);
- csize += size;
- buf += size;
- len -= size;
- } else if (bin_limit && size < bin_limit) {
- if (len < size) {
- goto L_overflow;
- }
- sys_memcpy(buf, pb->bytes+offset, size);
- csize += size;
- buf += size;
- len -= size;
- } else {
- if (csize != 0) {
- io_list_to_vec_set_vec(&iov, &binv, cbin,
- cptr, csize, &vlen);
- cptr = buf;
- csize = 0;
- }
- if (pb->flags) {
- erts_emasculate_writable_binary(pb);
- }
- io_list_to_vec_set_vec(
- &iov, &binv, Binary2ErlDrvBinary(pb->val),
- pb->bytes+offset, size, &vlen);
- }
- } else {
- ErlHeapBin* hb = (ErlHeapBin *) bptr;
- if (len < size) {
- goto L_overflow;
- }
- copy_binary_to_buffer(buf, 0,
- ((byte *) hb->data)+offset, bitoffs,
- 8*size);
- csize += size;
- buf += size;
- len -= size;
- }
- } else if (!is_nil(obj)) {
- goto L_type_error;
- }
- }
-
- if (csize != 0) {
- io_list_to_vec_set_vec(&iov, &binv, cbin, cptr, csize, &vlen);
- }
-
- DESTROY_ESTACK(s);
- return vlen;
-
- L_type_error:
- DESTROY_ESTACK(s);
- return -2;
-
- L_overflow:
- DESTROY_ESTACK(s);
- return -1;
-}
-
-#define IO_LIST_VEC_COUNT(obj) \
-do { \
- Uint _size = binary_size(obj); \
- Eterm _real; \
- ERTS_DECLARE_DUMMY(Uint _offset); \
- int _bitoffs; \
- int _bitsize; \
- ERTS_GET_REAL_BIN(obj, _real, _offset, _bitoffs, _bitsize); \
- if (_bitsize != 0) goto L_type_error; \
- if (thing_subtag(*binary_val(_real)) == REFC_BINARY_SUBTAG && \
- _bitoffs == 0) { \
- b_size += _size; \
- if (b_size < _size) goto L_overflow_error; \
- in_clist = 0; \
- v_size++; \
- /* If iov_len is smaller then Uint we split the binary into*/ \
- /* multiple smaller (2GB) elements in the iolist.*/ \
- v_size += _size / MAX_SYSIOVEC_IOVLEN; \
- if (_size >= ERL_SMALL_IO_BIN_LIMIT) { \
- p_in_clist = 0; \
- p_v_size++; \
- } else { \
- p_c_size += _size; \
- if (!p_in_clist) { \
- p_in_clist = 1; \
- p_v_size++; \
- } \
- } \
- } else { \
- c_size += _size; \
- if (c_size < _size) goto L_overflow_error; \
- if (!in_clist) { \
- in_clist = 1; \
- v_size++; \
- } \
- p_c_size += _size; \
- if (!p_in_clist) { \
- p_in_clist = 1; \
- p_v_size++; \
- } \
- } \
-} while (0)
-
-
-/*
- * Returns 0 if successful and a non-zero value otherwise.
- *
- * Return values through pointers:
- * *vsize - SysIOVec size needed for a writev
- * *csize - Number of bytes not in binary (in the common binary)
- * *pvsize - SysIOVec size needed if packing small binaries
- * *pcsize - Number of bytes in the common binary if packing
- * *total_size - Total size of iolist in bytes
- */
-
-static int
-io_list_vec_len(Eterm obj, int* vsize, Uint* csize,
- Uint* pvsize, Uint* pcsize,
- ErlDrvSizeT* total_size)
-{
- DECLARE_ESTACK(s);
- Eterm* objp;
- Uint v_size = 0;
- Uint c_size = 0;
- Uint b_size = 0;
- Uint in_clist = 0;
- Uint p_v_size = 0;
- Uint p_c_size = 0;
- Uint p_in_clist = 0;
- Uint total;
-
- goto L_jump_start; /* avoid a push */
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_jump_start:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
-
- if (is_byte(obj)) {
- c_size++;
- if (c_size == 0) {
- goto L_overflow_error;
- }
- if (!in_clist) {
- in_clist = 1;
- v_size++;
- }
- p_c_size++;
- if (!p_in_clist) {
- p_in_clist = 1;
- p_v_size++;
- }
- }
- else if (is_binary(obj)) {
- IO_LIST_VEC_COUNT(obj);
- }
- else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- }
- else if (!is_nil(obj)) {
- goto L_type_error;
- }
-
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj)) { /* binary tail is OK */
- IO_LIST_VEC_COUNT(obj);
- }
- else if (!is_nil(obj)) {
- goto L_type_error;
- }
- }
- else if (is_binary(obj)) {
- IO_LIST_VEC_COUNT(obj);
- }
- else if (!is_nil(obj)) {
- goto L_type_error;
- }
- }
-
- total = c_size + b_size;
- if (total < c_size) {
- goto L_overflow_error;
- }
- *total_size = (ErlDrvSizeT) total;
-
- DESTROY_ESTACK(s);
- *vsize = v_size;
- *csize = c_size;
- *pvsize = p_v_size;
- *pcsize = p_c_size;
- return 0;
-
- L_type_error:
- L_overflow_error:
- DESTROY_ESTACK(s);
- return 1;
-}
-
typedef enum {
ERTS_TRY_IMM_DRV_CALL_OK,
ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK,
@@ -1804,8 +1469,7 @@ cleanup_scheduled_outputv(ErlIOVec *ev, ErlDrvBinary *cbinp)
int i;
/* Need to free all binaries */
for (i = 1; i < ev->vsize; i++)
- if (ev->binv[i])
- driver_free_binary(ev->binv[i]);
+ driver_free_binary(ev->binv[i]);
if (cbinp)
driver_free_binary(cbinp);
}
@@ -1968,21 +1632,19 @@ int
erts_port_output_async(Port *prt, Eterm from, Eterm list)
{
- ErtsPortOpResult res;
ErtsProc2PortSigData *sigdp;
erts_driver_t *drv = prt->drv_ptr;
size_t size;
int task_flags;
ErtsProc2PortSigCallback port_sig_callback;
- ErlDrvBinary *cbin = NULL;
- ErlIOVec *evp = NULL;
+ ErtsIOQBinary *cbin = NULL;
+ ErtsIOVec *evp = NULL;
char *buf = NULL;
ErtsPortTaskHandle *ns_pthp;
if (drv->outputv) {
- ErlIOVec ev;
SysIOVec* ivp;
- ErlDrvBinary** bvp;
+ ErtsIOQBinary** bvp;
int vsize;
Uint csize;
Uint pvsize;
@@ -1992,91 +1654,63 @@ erts_port_output_async(Port *prt, Eterm from, Eterm list)
char *ptr;
int i;
- Eterm* bptr = NULL;
- Uint offset;
-
- if (is_binary(list)) {
- /* We optimize for when we get a procbin without offset */
- Eterm real_bin;
- int bitoffs;
- int bitsize;
- ERTS_GET_REAL_BIN(list, real_bin, offset, bitoffs, bitsize);
- bptr = binary_val(real_bin);
- if (*bptr == HEADER_PROC_BIN && bitoffs == 0) {
- size = binary_size(list);
- vsize = 1;
- } else
- bptr = NULL;
- }
-
- if (!bptr) {
- if (io_list_vec_len(list, &vsize, &csize, &pvsize, &pcsize, &size))
- goto bad_value;
+ if (erts_ioq_iodata_vec_len(list, &vsize, &csize, &pvsize, &pcsize,
+ &size, ERL_SMALL_IO_BIN_LIMIT))
+ goto bad_value;
- /* To pack or not to pack (small binaries) ...? */
- if (vsize >= SMALL_WRITE_VEC) {
- /* Do pack */
- vsize = pvsize + 1;
- csize = pcsize;
- blimit = ERL_SMALL_IO_BIN_LIMIT;
- }
- cbin = driver_alloc_binary(csize);
+ /* To pack or not to pack (small binaries) ...? */
+ if (vsize >= SMALL_WRITE_VEC) {
+ /* Do pack */
+ vsize = pvsize + 1;
+ csize = pcsize;
+ blimit = ERL_SMALL_IO_BIN_LIMIT;
+ }
+ if (csize) {
+ cbin = (ErtsIOQBinary *)driver_alloc_binary(csize);
if (!cbin)
erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize));
}
-
iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlIOVec));
binv_offset = iov_offset;
binv_offset += ERTS_ALC_DATA_ALIGN_SIZE((vsize+1)*sizeof(SysIOVec));
alloc_size = binv_offset;
- alloc_size += (vsize+1)*sizeof(ErlDrvBinary *);
+ alloc_size += (vsize+1)*sizeof(ErtsIOQBinary *);
sigdp = erts_port_task_alloc_p2p_sig_data_extra(alloc_size, (void**)&ptr);
- evp = (ErlIOVec *) ptr;
- ivp = evp->iov = (SysIOVec *) (ptr + iov_offset);
- bvp = evp->binv = (ErlDrvBinary **) (ptr + binv_offset);
+ evp = (ErtsIOVec *) ptr;
+ ivp = evp->driver.iov = (SysIOVec *) (ptr + iov_offset);
+ bvp = evp->common.binv = (ErtsIOQBinary **) (ptr + binv_offset);
ivp[0].iov_base = NULL;
ivp[0].iov_len = 0;
bvp[0] = NULL;
- if (bptr) {
- ProcBin* pb = (ProcBin *) bptr;
-
- ivp[1].iov_base = pb->bytes+offset;
- ivp[1].iov_len = size;
- bvp[1] = Binary2ErlDrvBinary(pb->val);
-
- evp->vsize = 1;
- } else {
-
- evp->vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit);
- if (evp->vsize < 0) {
- if (evp != &ev)
- erts_free(ERTS_ALC_T_DRV_CMD_DATA, evp);
- driver_free_binary(cbin);
- goto bad_value;
- }
+ evp->driver.vsize = erts_ioq_iodata_to_vec(list, ivp+1, bvp+1, cbin,
+ blimit, 1);
+ if (evp->driver.vsize < 0) {
+ erts_free(ERTS_ALC_T_DRV_CMD_DATA, evp);
+ driver_free_binary(&cbin->driver);
+ goto bad_value;
}
#if 0
/* This assertion may say something useful, but it can
be falsified during the emulator test suites. */
ASSERT(evp->vsize == vsize);
#endif
- evp->vsize++;
- evp->size = size; /* total size */
+ evp->driver.vsize++;
+ evp->driver.size = size; /* total size */
/* Need to increase refc on all binaries */
- for (i = 1; i < evp->vsize; i++)
+ for (i = 1; i < evp->driver.vsize; i++)
if (bvp[i])
- driver_binary_inc_refc(bvp[i]);
+ driver_binary_inc_refc(&bvp[i]->driver);
sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUTV;
sigdp->u.outputv.from = from;
- sigdp->u.outputv.evp = evp;
- sigdp->u.outputv.cbinp = cbin;
+ sigdp->u.outputv.evp = &evp->driver;
+ sigdp->u.outputv.cbinp = &cbin->driver;
port_sig_callback = port_sig_outputv;
} else {
ErlDrvSizeT ERTS_DECLARE_DUMMY(r);
@@ -2102,26 +1736,18 @@ erts_port_output_async(Port *prt, Eterm from, Eterm list)
sigdp->u.output.size = size;
port_sig_callback = port_sig_output;
}
- sigdp->flags = 0;
ns_pthp = NULL;
task_flags = 0;
- res = erts_schedule_proc2port_signal(NULL,
- prt,
- ERTS_INVALID_PID,
- NULL,
- sigdp,
- task_flags,
- ns_pthp,
- port_sig_callback);
+ erts_schedule_proc2port_signal(NULL,
+ prt,
+ ERTS_INVALID_PID,
+ NULL,
+ sigdp,
+ task_flags,
+ ns_pthp,
+ port_sig_callback);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- if (drv->outputv)
- cleanup_scheduled_outputv(evp, cbin);
- else
- cleanup_scheduled_output(buf);
- return 1;
- }
return 1;
bad_value:
@@ -2155,8 +1781,8 @@ erts_port_output(Process *c_p,
erts_aint32_t sched_flags, busy_flgs, invalid_flags;
int task_flags;
ErtsProc2PortSigCallback port_sig_callback;
- ErlDrvBinary *cbin = NULL;
- ErlIOVec *evp = NULL;
+ ErtsIOQBinary *cbin = NULL;
+ ErtsIOVec *evp = NULL;
char *buf = NULL;
int force_immediate_call = (flags & ERTS_PORT_SIG_FLG_FORCE_IMM_CALL);
int async_nosuspend;
@@ -2202,11 +1828,11 @@ erts_port_output(Process *c_p,
}
#endif
if (drv->outputv) {
- ErlIOVec ev;
+ ErtsIOVec ev;
SysIOVec iv[SMALL_WRITE_VEC];
- ErlDrvBinary* bv[SMALL_WRITE_VEC];
+ ErtsIOQBinary* bv[SMALL_WRITE_VEC];
SysIOVec* ivp;
- ErlDrvBinary** bvp;
+ ErtsIOQBinary** bvp;
int vsize;
Uint csize;
Uint pvsize;
@@ -2214,18 +1840,19 @@ erts_port_output(Process *c_p,
Uint blimit;
size_t iov_offset, binv_offset, alloc_size;
- if (io_list_vec_len(list, &vsize, &csize, &pvsize, &pcsize, &size))
+ if (erts_ioq_iodata_vec_len(list, &vsize, &csize, &pvsize, &pcsize,
+ &size, ERL_SMALL_IO_BIN_LIMIT))
goto bad_value;
iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlIOVec));
binv_offset = iov_offset;
binv_offset += ERTS_ALC_DATA_ALIGN_SIZE((vsize+1)*sizeof(SysIOVec));
alloc_size = binv_offset;
- alloc_size += (vsize+1)*sizeof(ErlDrvBinary *);
+ alloc_size += (vsize+1)*sizeof(ErtsIOQBinary *);
if (try_call && vsize < SMALL_WRITE_VEC) {
- ivp = ev.iov = iv;
- bvp = ev.binv = bv;
+ ivp = ev.common.iov = iv;
+ bvp = ev.common.binv = bv;
evp = &ev;
}
else {
@@ -2236,9 +1863,9 @@ erts_port_output(Process *c_p,
sigdp = erts_port_task_alloc_p2p_sig_data_extra(
alloc_size, (void**)&ptr);
}
- evp = (ErlIOVec *) ptr;
- ivp = evp->iov = (SysIOVec *) (ptr + iov_offset);
- bvp = evp->binv = (ErlDrvBinary **) (ptr + binv_offset);
+ evp = (ErtsIOVec *) ptr;
+ ivp = evp->driver.iov = (SysIOVec *) (ptr + iov_offset);
+ bvp = evp->common.binv = (ErtsIOQBinary **) (ptr + binv_offset);
}
/* To pack or not to pack (small binaries) ...? */
@@ -2254,23 +1881,26 @@ erts_port_output(Process *c_p,
}
/* Use vsize and csize from now on */
- cbin = driver_alloc_binary(csize);
- if (!cbin)
- erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize));
+ if (csize) {
+ cbin = (ErtsIOQBinary *)driver_alloc_binary(csize);
+ if (!cbin)
+ erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize));
+ }
/* Element 0 is for driver usage to add header block */
ivp[0].iov_base = NULL;
ivp[0].iov_len = 0;
bvp[0] = NULL;
- evp->vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit);
- if (evp->vsize < 0) {
+ evp->driver.vsize = erts_ioq_iodata_to_vec(list, ivp+1, bvp+1,
+ cbin, blimit, 1);
+ if (evp->driver.vsize < 0) {
if (evp != &ev) {
if (try_call)
erts_free(ERTS_ALC_T_TMP, evp);
else
erts_port_task_free_p2p_sig_data(sigdp);
}
- driver_free_binary(cbin);
+ driver_free_binary(&cbin->driver);
goto bad_value;
}
#if 0
@@ -2278,19 +1908,19 @@ erts_port_output(Process *c_p,
be falsified during the emulator test suites. */
ASSERT(evp->vsize == vsize);
#endif
- evp->vsize++;
- evp->size = size; /* total size */
+ evp->driver.vsize++;
+ evp->driver.size = size; /* total size */
if (!try_call) {
int i;
/* Need to increase refc on all binaries */
- for (i = 1; i < evp->vsize; i++)
- if (bvp[i])
- driver_binary_inc_refc(bvp[i]);
+ for (i = 1; i < evp->driver.vsize; i++)
+ if (bvp[i])
+ driver_binary_inc_refc(&bvp[i]->driver);
}
else {
int i;
- ErlIOVec *new_evp;
+ ErtsIOVec *new_evp;
ErtsTryImmDrvCallResult try_call_res;
ErtsTryImmDrvCallState try_call_state
= ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
@@ -2313,14 +1943,14 @@ erts_port_output(Process *c_p,
from,
prt,
drv,
- evp);
+ &evp->driver);
if (force_immediate_call)
finalize_force_imm_drv_call(&try_call_state);
else
finalize_imm_drv_call(&try_call_state);
/* Fall through... */
case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
- driver_free_binary(cbin);
+ driver_free_binary(&cbin->driver);
if (evp != &ev) {
ASSERT(!sigdp);
erts_free(ERTS_ALC_T_TMP, evp);
@@ -2334,7 +1964,7 @@ erts_port_output(Process *c_p,
sched_flags = try_call_state.sched_flags;
if (async_nosuspend
&& (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT))) {
- driver_free_binary(cbin);
+ driver_free_binary(&cbin->driver);
if (evp != &ev) {
ASSERT(!sigdp);
erts_free(ERTS_ALC_T_TMP, evp);
@@ -2349,9 +1979,9 @@ erts_port_output(Process *c_p,
}
/* Need to increase refc on all binaries */
- for (i = 1; i < evp->vsize; i++)
+ for (i = 1; i < evp->driver.vsize; i++)
if (bvp[i])
- driver_binary_inc_refc(bvp[i]);
+ driver_binary_inc_refc(&bvp[i]->driver);
/* The port task and iovec is allocated in the
same structure as an optimization. This
@@ -2364,18 +1994,18 @@ erts_port_output(Process *c_p,
if (evp != &ev) {
/* Copy from TMP alloc to port task */
sys_memcpy((void *) new_evp, (void *) evp, alloc_size);
- new_evp->iov = (SysIOVec *) (((char *) new_evp)
- + iov_offset);
- bvp = new_evp->binv = (ErlDrvBinary **) (((char *) new_evp)
- + binv_offset);
+ new_evp->driver.iov = (SysIOVec *) (((char *) new_evp)
+ + iov_offset);
+ bvp = new_evp->common.binv = (ErtsIOQBinary **) (((char *) new_evp)
+ + binv_offset);
#ifdef DEBUG
- ASSERT(new_evp->vsize == evp->vsize);
- ASSERT(new_evp->size == evp->size);
- for (i = 0; i < evp->vsize; i++) {
- ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len);
- ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base);
- ASSERT(new_evp->binv[i] == evp->binv[i]);
+ ASSERT(new_evp->driver.vsize == evp->driver.vsize);
+ ASSERT(new_evp->driver.size == evp->driver.size);
+ for (i = 0; i < evp->driver.vsize; i++) {
+ ASSERT(new_evp->driver.iov[i].iov_len == evp->driver.iov[i].iov_len);
+ ASSERT(new_evp->driver.iov[i].iov_base == evp->driver.iov[i].iov_base);
+ ASSERT(new_evp->driver.binv[i] == evp->driver.binv[i]);
}
#endif
@@ -2384,24 +2014,24 @@ erts_port_output(Process *c_p,
else { /* from stack allocated structure; offsets may differ */
sys_memcpy((void *) new_evp, (void *) evp, sizeof(ErlIOVec));
- new_evp->iov = (SysIOVec *) (((char *) new_evp)
- + iov_offset);
- sys_memcpy((void *) new_evp->iov,
- (void *) evp->iov,
- evp->vsize * sizeof(SysIOVec));
- new_evp->binv = (ErlDrvBinary **) (((char *) new_evp)
- + binv_offset);
- sys_memcpy((void *) new_evp->binv,
- (void *) evp->binv,
- evp->vsize * sizeof(ErlDrvBinary *));
+ new_evp->driver.iov = (SysIOVec *) (((char *) new_evp)
+ + iov_offset);
+ sys_memcpy((void *) new_evp->driver.iov,
+ (void *) evp->driver.iov,
+ evp->driver.vsize * sizeof(SysIOVec));
+ new_evp->common.binv = (ErtsIOQBinary **) (((char *) new_evp)
+ + binv_offset);
+ sys_memcpy((void *) new_evp->common.binv,
+ (void *) evp->common.binv,
+ evp->driver.vsize * sizeof(ErtsIOQBinary *));
#ifdef DEBUG
- ASSERT(new_evp->vsize == evp->vsize);
- ASSERT(new_evp->size == evp->size);
- for (i = 0; i < evp->vsize; i++) {
- ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len);
- ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base);
- ASSERT(new_evp->binv[i] == evp->binv[i]);
+ ASSERT(new_evp->driver.vsize == evp->driver.vsize);
+ ASSERT(new_evp->driver.size == evp->driver.size);
+ for (i = 0; i < evp->driver.vsize; i++) {
+ ASSERT(new_evp->driver.iov[i].iov_len == evp->driver.iov[i].iov_len);
+ ASSERT(new_evp->driver.iov[i].iov_base == evp->driver.iov[i].iov_base);
+ ASSERT(new_evp->driver.binv[i] == evp->driver.binv[i]);
}
#endif
@@ -2412,8 +2042,8 @@ erts_port_output(Process *c_p,
sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUTV;
sigdp->u.outputv.from = from;
- sigdp->u.outputv.evp = evp;
- sigdp->u.outputv.cbinp = cbin;
+ sigdp->u.outputv.evp = &evp->driver;
+ sigdp->u.outputv.cbinp = &cbin->driver;
port_sig_callback = port_sig_outputv;
}
else {
@@ -2554,10 +2184,6 @@ erts_port_output(Process *c_p,
port_sig_callback);
if (res != ERTS_PORT_OP_SCHEDULED) {
- if (drv->outputv)
- cleanup_scheduled_outputv(evp, cbin);
- else
- cleanup_scheduled_output(buf);
return res;
}
@@ -2736,21 +2362,14 @@ erts_port_exit(Process *c_p,
&bp->off_heap);
}
- res = erts_schedule_proc2port_signal(c_p,
- prt,
- c_p ? c_p->common.id : from,
- refp,
- sigdp,
- 0,
- NULL,
- port_sig_exit);
-
- if (res == ERTS_PORT_OP_DROPPED) {
- if (bp)
- free_message_buffer(bp);
- }
-
- return res;
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : from,
+ refp,
+ sigdp,
+ 0,
+ NULL,
+ port_sig_exit);
}
static ErtsPortOpResult
@@ -3419,9 +3038,8 @@ void erts_init_io(int port_tab_size,
else if (port_tab_size < ERTS_MIN_PORTS)
port_tab_size = ERTS_MIN_PORTS;
- erts_smp_rwmtx_init_opt(&erts_driver_list_lock,
- &drv_list_rwmtx_opts,
- "driver_list");
+ erts_smp_rwmtx_init_opt(&erts_driver_list_lock, &drv_list_rwmtx_opts, "driver_list", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
driver_list = NULL;
erts_smp_tsd_key_create(&driver_list_lock_status_key,
"erts_driver_list_lock_status_key");
@@ -3458,67 +3076,94 @@ void erts_init_io(int port_tab_size,
}
#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
-static ERTS_INLINE void lcnt_enable_drv_lock_count(erts_driver_t *dp, int enable)
+static void lcnt_enable_driver_lock_count(erts_driver_t *dp, int enable)
{
if (dp->lock) {
- if (enable)
- erts_lcnt_init_lock_x(&dp->lock->lcnt,
- "driver_lock",
- ERTS_LCNT_LT_MUTEX,
- erts_atom_put((byte*)dp->name,
- sys_strlen(dp->name),
- ERTS_ATOM_ENC_LATIN1,
- 1));
-
- else
- erts_lcnt_destroy_lock(&dp->lock->lcnt);
+ if (enable) {
+ Eterm name_as_atom = erts_atom_put((byte*)dp->name, sys_strlen(dp->name),
+ ERTS_ATOM_ENC_LATIN1, 1);
+ erts_lcnt_install_new_lock_info(&dp->lock->lcnt, "driver_lock", name_as_atom,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ } else {
+ erts_lcnt_uninstall(&dp->lock->lcnt);
+ }
}
}
-static ERTS_INLINE void lcnt_enable_port_lock_count(Port *prt, int enable)
+static void lcnt_enable_port_lock_count(Port *prt, int enable)
{
erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
- if (!enable) {
- erts_lcnt_destroy_lock(&prt->sched.mtx.lcnt);
- if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
- erts_lcnt_destroy_lock(&prt->lock->lcnt);
+
+ if(enable) {
+ ErlDrvPDL pdl = prt->port_data_lock;
+
+ erts_lcnt_install_new_lock_info(&prt->sched.mtx.lcnt, "port_sched_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+
+ if(pdl) {
+ erts_lcnt_install_new_lock_info(&pdl->mtx.lcnt, "port_data_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+
+ if(state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ erts_lcnt_install_new_lock_info(&prt->lock->lcnt, "port_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+ } else {
+ erts_lcnt_uninstall(&prt->sched.mtx.lcnt);
+
+ if(prt->port_data_lock) {
+ erts_lcnt_uninstall(&prt->port_data_lock->mtx.lcnt);
+ }
+
+ if(state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ erts_lcnt_uninstall(&prt->lock->lcnt);
+ }
}
- else {
- erts_lcnt_init_lock_x(&prt->sched.mtx.lcnt,
- "port_sched_lock",
- ERTS_LCNT_LT_MUTEX,
- prt->common.id);
- if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
- erts_lcnt_init_lock_x(&prt->lock->lcnt,
- "port_lock",
- ERTS_LCNT_LT_MUTEX,
- prt->common.id);
+}
+
+void erts_lcnt_update_driver_locks(int enable) {
+ erts_driver_t *driver;
+
+ lcnt_enable_driver_lock_count(&vanilla_driver, enable);
+ lcnt_enable_driver_lock_count(&spawn_driver, enable);
+#ifndef __WIN32__
+ lcnt_enable_driver_lock_count(&forker_driver, enable);
+#endif
+ lcnt_enable_driver_lock_count(&fd_driver, enable);
+
+ erts_rwmtx_rlock(&erts_driver_list_lock);
+
+ for (driver = driver_list; driver; driver = driver->next) {
+ lcnt_enable_driver_lock_count(driver, enable);
}
+
+ erts_rwmtx_runlock(&erts_driver_list_lock);
}
-void erts_lcnt_enable_io_lock_count(int enable) {
- erts_driver_t *dp;
- int ix, max = erts_ptab_max(&erts_port);
- Port *prt;
+void erts_lcnt_update_port_locks(int enable) {
+ int i, max;
- for (ix = 0; ix < max; ix++) {
- if ((prt = erts_pix2port(ix)) != NULL) {
- lcnt_enable_port_lock_count(prt, enable);
+ max = erts_ptab_max(&erts_port);
+
+ for(i = 0; i < max; i++) {
+ int delay_handle;
+ Port *port;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+ port = erts_pix2port(i);
+
+ if(port != NULL) {
+ lcnt_enable_port_lock_count(port, enable);
}
- } /* for all ports */
- lcnt_enable_drv_lock_count(&vanilla_driver, enable);
- lcnt_enable_drv_lock_count(&spawn_driver, enable);
-#ifndef __WIN32__
- lcnt_enable_drv_lock_count(&forker_driver, enable);
-#endif
- lcnt_enable_drv_lock_count(&fd_driver, enable);
- /* enable lock counting in all drivers */
- for (dp = driver_list; dp; dp = dp->next) {
- lcnt_enable_drv_lock_count(dp, enable);
+ if(delay_handle != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ }
}
-} /* enable/disable lock counting of ports */
+}
+
#endif /* defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) */
/*
* Buffering of data when using line oriented I/O on ports
@@ -3701,7 +3346,7 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res)
ERTS_SMP_CHK_NO_PROC_LOCKS;
ASSERT(!prt || prt->common.id == sender);
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
ASSERT(!prt || erts_lc_is_port_locked(prt));
#endif
@@ -4930,10 +4575,9 @@ erts_port_control(Process* c_p,
0,
NULL,
port_sig_control);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- cleanup_scheduled_control(binp, bufp);
+ if (res != ERTS_PORT_OP_SCHEDULED)
return ERTS_PORT_OP_BADARG;
- }
+
return res;
}
@@ -5223,10 +4867,9 @@ erts_port_call(Process* c_p,
0,
NULL,
port_sig_call);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- cleanup_scheduled_call(bufp);
+ if (res != ERTS_PORT_OP_SCHEDULED)
return ERTS_PORT_OP_BADARG;
- }
+
return res;
}
@@ -7093,7 +6736,7 @@ driver_pdl_create(ErlDrvPort dp)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
- erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id);
+ erts_mtx_init(&pdl->mtx, "port_data_lock", pp->common.id, ERTS_LOCK_FLAGS_CATEGORY_IO);
pdl_init_refc(pdl);
erts_port_inc_refc(pp);
pdl->prt = pp;
@@ -7157,307 +6800,51 @@ driver_pdl_dec_refc(ErlDrvPDL pdl)
return refc;
}
-/* expand queue to hold n elements in tail or head */
-static int expandq(ErlIOQueue* q, int n, int tail)
-/* tail: 0 if make room in head, make room in tail otherwise */
-{
- int h_sz; /* room before header */
- int t_sz; /* room after tail */
- int q_sz; /* occupied */
- int nvsz;
- SysIOVec* niov;
- ErlDrvBinary** nbinv;
-
- h_sz = q->v_head - q->v_start;
- t_sz = q->v_end - q->v_tail;
- q_sz = q->v_tail - q->v_head;
-
- if (tail && (n <= t_sz)) /* do we need to expand tail? */
- return 0;
- else if (!tail && (n <= h_sz)) /* do we need to expand head? */
- return 0;
- else if (n > (h_sz + t_sz)) { /* need to allocate */
- /* we may get little extra but it ok */
- nvsz = (q->v_end - q->v_start) + n;
-
- niov = erts_alloc_fnf(ERTS_ALC_T_IOQ, nvsz * sizeof(SysIOVec));
- if (!niov)
- return -1;
- nbinv = erts_alloc_fnf(ERTS_ALC_T_IOQ, nvsz * sizeof(ErlDrvBinary**));
- if (!nbinv) {
- erts_free(ERTS_ALC_T_IOQ, (void *) niov);
- return -1;
- }
- if (tail) {
- sys_memcpy(niov, q->v_head, q_sz*sizeof(SysIOVec));
- if (q->v_start != q->v_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
- q->v_start = niov;
- q->v_end = niov + nvsz;
- q->v_head = q->v_start;
- q->v_tail = q->v_head + q_sz;
-
- sys_memcpy(nbinv, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- if (q->b_start != q->b_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
- q->b_start = nbinv;
- q->b_end = nbinv + nvsz;
- q->b_head = q->b_start;
- q->b_tail = q->b_head + q_sz;
- }
- else {
- sys_memcpy(niov+nvsz-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
- if (q->v_start != q->v_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
- q->v_start = niov;
- q->v_end = niov + nvsz;
- q->v_tail = q->v_end;
- q->v_head = q->v_tail - q_sz;
-
- sys_memcpy(nbinv+nvsz-q_sz, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- if (q->b_start != q->b_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
- q->b_start = nbinv;
- q->b_end = nbinv + nvsz;
- q->b_tail = q->b_end;
- q->b_head = q->b_tail - q_sz;
- }
- }
- else if (tail) { /* move to beginning to make room in tail */
- sys_memmove(q->v_start, q->v_head, q_sz*sizeof(SysIOVec));
- q->v_head = q->v_start;
- q->v_tail = q->v_head + q_sz;
- sys_memmove(q->b_start, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- q->b_head = q->b_start;
- q->b_tail = q->b_head + q_sz;
- }
- else { /* move to end to make room */
- sys_memmove(q->v_end-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
- q->v_tail = q->v_end;
- q->v_head = q->v_tail-q_sz;
- sys_memmove(q->b_end-q_sz, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- q->b_tail = q->b_end;
- q->b_head = q->b_tail-q_sz;
- }
-
- return 0;
-}
-
-
-
/* Put elements from vec at q tail */
int driver_enqv(ErlDrvPort ix, ErlIOVec* vec, ErlDrvSizeT skip)
{
- int n;
- size_t len;
- ErlDrvSizeT size;
- SysIOVec* iov;
- ErlDrvBinary** binv;
- ErlDrvBinary* b;
- ErlIOQueue* q = drvport2ioq(ix);
-
- if (q == NULL)
- return -1;
-
- ASSERT(vec->size >= skip); /* debug only */
- if (vec->size <= skip)
- return 0;
- size = vec->size - skip;
-
- iov = vec->iov;
- binv = vec->binv;
- n = vec->vsize;
-
- /* we use do here to strip iov_len=0 from beginning */
- do {
- len = iov->iov_len;
- if (len <= skip) {
- skip -= len;
- iov++;
- binv++;
- n--;
- }
- else {
- iov->iov_base = ((char *)(iov->iov_base)) + skip;
- iov->iov_len -= skip;
- skip = 0;
- }
- } while(skip > 0);
-
- if (q->v_tail + n >= q->v_end)
- expandq(q, n, 1);
-
- /* Queue and reference all binaries (remove zero length items) */
- while(n--) {
- if ((len = iov->iov_len) > 0) {
- if ((b = *binv) == NULL) { /* speical case create binary ! */
- b = driver_alloc_binary(len);
- sys_memcpy(b->orig_bytes, iov->iov_base, len);
- *q->b_tail++ = b;
- q->v_tail->iov_len = len;
- q->v_tail->iov_base = b->orig_bytes;
- q->v_tail++;
- }
- else {
- driver_binary_inc_refc(b);
- *q->b_tail++ = b;
- *q->v_tail++ = *iov;
- }
- }
- iov++;
- binv++;
- }
- q->size += size; /* update total size in queue */
- return 0;
+ ASSERT(vec->size >= skip);
+ return erts_ioq_enqv(drvport2ioq(ix), (ErtsIOVec*)vec, skip);
}
/* Put elements from vec at q head */
int driver_pushqv(ErlDrvPort ix, ErlIOVec* vec, ErlDrvSizeT skip)
{
- int n;
- size_t len;
- ErlDrvSizeT size;
- SysIOVec* iov;
- ErlDrvBinary** binv;
- ErlDrvBinary* b;
- ErlIOQueue* q = drvport2ioq(ix);
-
- if (q == NULL)
- return -1;
-
- if (vec->size <= skip)
- return 0;
- size = vec->size - skip;
-
- iov = vec->iov;
- binv = vec->binv;
- n = vec->vsize;
-
- /* we use do here to strip iov_len=0 from beginning */
- do {
- len = iov->iov_len;
- if (len <= skip) {
- skip -= len;
- iov++;
- binv++;
- n--;
- }
- else {
- iov->iov_base = ((char *)(iov->iov_base)) + skip;
- iov->iov_len -= skip;
- skip = 0;
- }
- } while(skip > 0);
-
- if (q->v_head - n < q->v_start)
- expandq(q, n, 0);
-
- /* Queue and reference all binaries (remove zero length items) */
- iov += (n-1); /* move to end */
- binv += (n-1); /* move to end */
- while(n--) {
- if ((len = iov->iov_len) > 0) {
- if ((b = *binv) == NULL) { /* speical case create binary ! */
- b = driver_alloc_binary(len);
- sys_memcpy(b->orig_bytes, iov->iov_base, len);
- *--q->b_head = b;
- q->v_head--;
- q->v_head->iov_len = len;
- q->v_head->iov_base = b->orig_bytes;
- }
- else {
- driver_binary_inc_refc(b);
- *--q->b_head = b;
- *--q->v_head = *iov;
- }
- }
- iov--;
- binv--;
- }
- q->size += size; /* update total size in queue */
- return 0;
+ ASSERT(vec->size >= skip);
+ return erts_ioq_pushqv(drvport2ioq(ix), (ErtsIOVec*)vec, skip);
}
-
/*
** Remove size bytes from queue head
** Return number of bytes that remain in queue
*/
ErlDrvSizeT driver_deq(ErlDrvPort ix, ErlDrvSizeT size)
{
- ErlIOQueue* q = drvport2ioq(ix);
- ErlDrvSizeT len;
-
- if ((q == NULL) || (q->size < size))
- return -1;
- q->size -= size;
- while (size > 0) {
- ASSERT(q->v_head != q->v_tail);
-
- len = q->v_head->iov_len;
- if (len <= size) {
- size -= len;
- driver_free_binary(*q->b_head);
- *q->b_head++ = NULL;
- q->v_head++;
- }
- else {
- q->v_head->iov_base = ((char *)(q->v_head->iov_base)) + size;
- q->v_head->iov_len -= size;
- size = 0;
- }
- }
-
- /* restart pointers (optimised for enq) */
- if (q->v_head == q->v_tail) {
- q->v_head = q->v_tail = q->v_start;
- q->b_head = q->b_tail = q->b_start;
- }
- return q->size;
+ ErlPortIOQueue *q = drvport2ioq(ix);
+ if (erts_ioq_deq(q, size) == -1)
+ return -1;
+ return erts_ioq_size(q);
}
-ErlDrvSizeT driver_peekqv(ErlDrvPort ix, ErlIOVec *ev) {
- ErlIOQueue *q = drvport2ioq(ix);
- ASSERT(ev);
-
- if (! q) {
- return (ErlDrvSizeT) -1;
- } else {
- if ((ev->vsize = q->v_tail - q->v_head) == 0) {
- ev->size = 0;
- ev->iov = NULL;
- ev->binv = NULL;
- } else {
- ev->size = q->size;
- ev->iov = q->v_head;
- ev->binv = q->b_head;
- }
- return q->size;
- }
+ErlDrvSizeT driver_peekqv(ErlDrvPort ix, ErlIOVec *ev)
+{
+ return erts_ioq_peekqv(drvport2ioq(ix), (ErtsIOVec*)ev);
}
SysIOVec* driver_peekq(ErlDrvPort ix, int* vlenp) /* length of io-vector */
{
- ErlIOQueue* q = drvport2ioq(ix);
-
- if (q == NULL) {
- *vlenp = -1;
- return NULL;
- }
- if ((*vlenp = (q->v_tail - q->v_head)) == 0)
- return NULL;
- return q->v_head;
+ return erts_ioq_peekq(drvport2ioq(ix), vlenp);
}
ErlDrvSizeT driver_sizeq(ErlDrvPort ix)
{
- ErlIOQueue* q = drvport2ioq(ix);
+ ErlPortIOQueue *q = drvport2ioq(ix);
if (q == NULL)
- return (size_t) -1;
- return q->size;
+ return (ErlDrvSizeT) -1;
+ return erts_ioq_size(q);
}
@@ -8260,22 +7647,16 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->flags = de->driver_flags;
drv->handle = handle;
#ifdef ERTS_SMP
- if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING)
- drv->lock = NULL;
- else {
- drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK,
- sizeof(erts_mtx_t));
- erts_mtx_init_x(drv->lock,
- "driver_lock",
-#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT)
- erts_atom_put((byte *) drv->name,
- sys_strlen(drv->name),
- ERTS_ATOM_ENC_LATIN1,
- 1)
-#else
- NIL
-#endif
- );
+ if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING) {
+ drv->lock = NULL;
+ } else {
+ Eterm driver_id = erts_atom_put((byte *) drv->name,
+ sys_strlen(drv->name),
+ ERTS_ATOM_ENC_LATIN1, 1);
+
+ drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK, sizeof(erts_mtx_t));
+
+ erts_mtx_init(drv->lock, "driver_lock", driver_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
#endif
drv->entry = de;
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
index 8ab6c713d6..7987cb2eb5 100644
--- a/erts/emulator/beam/module.c
+++ b/erts/emulator/beam/module.c
@@ -120,7 +120,8 @@ void init_module_table(void)
}
for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- erts_smp_rwmtx_init_x(&the_old_code_rwlocks[i], "old_code", make_small(i));
+ erts_smp_rwmtx_init(&the_old_code_rwlocks[i], "old_code", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
erts_smp_atomic_init_nob(&tot_module_bytes, 0);
}
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index cdf9cb58b9..44613c7d85 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -1174,9 +1174,9 @@ bs_get_binary2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \
%macro: i_bs_get_binary2 BsGetBinary_2 -fail_action
%macro: i_bs_get_binary_all2 BsGetBinaryAll_2 -fail_action
-i_bs_get_binary_imm2 f x I I I d
-i_bs_get_binary2 f x I s I d
-i_bs_get_binary_all2 f x I I d
+i_bs_get_binary_imm2 f x I I I x
+i_bs_get_binary2 f x I s I x
+i_bs_get_binary_all2 f x I I x
i_bs_get_binary_all_reuse x f I
# Fetching float from binaries.
@@ -1186,7 +1186,7 @@ bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \
bs_get_float2 Fail=f Ms=x Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail
%macro: i_bs_get_float2 BsGetFloat2 -fail_action
-i_bs_get_float2 f x I s I d
+i_bs_get_float2 f x I s I x
# Miscellanous
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 7f60710124..bf3267cff1 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -145,7 +145,8 @@ void init_register_table(void)
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab");
+ erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) reg_hash;
f.cmp = (HCMP_FUN) reg_cmp;
diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c
index 30b26a7296..527c9efeca 100644
--- a/erts/emulator/beam/safe_hash.c
+++ b/erts/emulator/beam/safe_hash.c
@@ -155,7 +155,8 @@ int safe_hash_table_sz(SafeHash *h)
** Init a pre allocated or static hash structure
** and allocate buckets. NOT SAFE
*/
-SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size, SafeHashFunctions fun)
+SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, erts_lock_flags_t flags,
+ int size, SafeHashFunctions fun)
{
int i, bytes;
@@ -170,7 +171,8 @@ SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size,
erts_smp_atomic_init_nob(&h->is_rehashing, 0);
erts_smp_atomic_init_nob(&h->nitems, 0);
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
- erts_smp_mtx_init(&h->lock_vec[i].mtx,"safe_hash");
+ erts_smp_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL,
+ flags);
}
return h;
}
@@ -273,5 +275,22 @@ void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *), void *func_ar
}
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_hash_lock_count(SafeHash *h, erts_lock_flags_t flags, int enable) {
+ int i;
+
+ for(i = 0; i < SAFE_HASH_LOCK_CNT; i++) {
+ erts_smp_mtx_t *lock = &h->lock_vec[i].mtx;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&lock->lcnt, "safe_hash", NIL,
+ ERTS_LOCK_TYPE_MUTEX | flags);
+ } else {
+ erts_lcnt_uninstall(&lock->lcnt);
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
+
#endif /* !ERTS_SYS_CONTINOUS_FD_NUMBERS */
diff --git a/erts/emulator/beam/safe_hash.h b/erts/emulator/beam/safe_hash.h
index 285103cb17..dde48a6de8 100644
--- a/erts/emulator/beam/safe_hash.h
+++ b/erts/emulator/beam/safe_hash.h
@@ -28,6 +28,7 @@
#include "sys.h"
#include "erl_alloc.h"
+#include "erl_lock_flags.h"
typedef unsigned long SafeHashValue;
@@ -85,7 +86,7 @@ typedef struct
/* A: Lockless atomics */
} SafeHash;
-SafeHash* safe_hash_init(ErtsAlcType_t, SafeHash*, char*, int, SafeHashFunctions);
+SafeHash* safe_hash_init(ErtsAlcType_t, SafeHash*, char*, erts_lock_flags_t, int, SafeHashFunctions);
void safe_hash_get_info(SafeHashInfo*, SafeHash*);
int safe_hash_table_sz(SafeHash *);
@@ -96,5 +97,9 @@ void* safe_hash_erase(SafeHash*, void*);
void safe_hash_for_each(SafeHash*, void (*func)(void *, void *), void *);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_hash_lock_count(SafeHash*, erts_lock_flags_t, int);
+#endif
+
#endif /* __SAFE_HASH_H__ */
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index d752ea4330..6662685f54 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -784,10 +784,6 @@ Preload* sys_preloaded(void);
unsigned char* sys_preload_begin(Preload*);
void sys_preload_end(Preload*);
int sys_get_key(int);
-void elapsed_time_both(UWord *ms_user, UWord *ms_sys,
- UWord *ms_user_diff, UWord *ms_sys_diff);
-void wall_clock_elapsed_time_both(UWord *ms_total,
- UWord *ms_diff);
void get_time(int *hour, int *minute, int *second);
void get_date(int *year, int *month, int *day);
void get_localtime(int *year, int *month, int *day,
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 457cada745..d7116bd2c3 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -42,6 +42,7 @@
#include "dist.h"
#include "erl_printf.h"
#include "erl_threads.h"
+#include "erl_lock_count.h"
#include "erl_smp.h"
#include "erl_time.h"
#include "erl_thr_progress.h"
@@ -51,6 +52,7 @@
#include "erl_ptab.h"
#include "erl_check_io.h"
#include "erl_bif_unique.h"
+#include "erl_io_queue.h"
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
#ifdef HIPE
@@ -3634,13 +3636,78 @@ intlist_to_buf(Eterm list, char *buf, Sint len)
return -2; /* not enough space */
}
-/* Fill buf with the contents of the unicode list.
- * Return the number of bytes in the buffer,
- * or -1 for type error,
- * or -2 for not enough buffer space (buffer contains truncated result).
+/** @brief Fill buf with the UTF8 contents of the unicode list
+ * @param len Max number of characters to write.
+ * @param written NULL or bytes written.
+ * @return 0 ok,
+ * -1 type error,
+ * -2 list too long, only \c len characters written
*/
+int
+erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written)
+{
+ Eterm* listptr;
+ Sint sz = 0;
+ Sint val;
+ int res;
+
+ while (1) {
+ if (is_nil(list)) {
+ res = 0;
+ break;
+ }
+ if (is_not_list(list)) {
+ res = -1;
+ break;
+ }
+ listptr = list_val(list);
+
+ if (len-- <= 0) {
+ res = -2;
+ break;
+ }
+
+ if (is_not_small(CAR(listptr))) {
+ res = -1;
+ break;
+ }
+ val = signed_val(CAR(listptr));
+ if (0 <= val && val < 0x80) {
+ buf[sz] = val;
+ sz++;
+ } else if (val < 0x800) {
+ buf[sz+0] = 0xC0 | (val >> 6);
+ buf[sz+1] = 0x80 | (val & 0x3F);
+ sz += 2;
+ } else if (val < 0x10000UL) {
+ if (0xD800 <= val && val <= 0xDFFF) {
+ res = -1;
+ break;
+ }
+ buf[sz+0] = 0xE0 | (val >> 12);
+ buf[sz+1] = 0x80 | ((val >> 6) & 0x3F);
+ buf[sz+2] = 0x80 | (val & 0x3F);
+ sz += 3;
+ } else if (val < 0x110000) {
+ buf[sz+0] = 0xF0 | (val >> 18);
+ buf[sz+1] = 0x80 | ((val >> 12) & 0x3F);
+ buf[sz+2] = 0x80 | ((val >> 6) & 0x3F);
+ buf[sz+3] = 0x80 | (val & 0x3F);
+ sz += 4;
+ } else {
+ res = -1;
+ break;
+ }
+ list = CDR(listptr);
+ }
+
+ if (written)
+ *written = sz;
+ return res;
+}
+
Sint
-erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len)
+erts_unicode_list_to_buf_len(Eterm list)
{
Eterm* listptr;
Sint sz = 0;
@@ -3653,7 +3720,7 @@ erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len)
}
listptr = list_val(list);
- while (len-- > 0) {
+ while (1) {
Sint val;
if (is_not_small(CAR(listptr))) {
@@ -3661,25 +3728,15 @@ erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len)
}
val = signed_val(CAR(listptr));
if (0 <= val && val < 0x80) {
- buf[sz] = val;
sz++;
} else if (val < 0x800) {
- buf[sz+0] = 0xC0 | (val >> 6);
- buf[sz+1] = 0x80 | (val & 0x3F);
sz += 2;
} else if (val < 0x10000UL) {
if (0xD800 <= val && val <= 0xDFFF) {
return -1;
}
- buf[sz+0] = 0xE0 | (val >> 12);
- buf[sz+1] = 0x80 | ((val >> 6) & 0x3F);
- buf[sz+2] = 0x80 | (val & 0x3F);
sz += 3;
} else if (val < 0x110000) {
- buf[sz+0] = 0xF0 | (val >> 18);
- buf[sz+1] = 0x80 | ((val >> 12) & 0x3F);
- buf[sz+2] = 0x80 | ((val >> 6) & 0x3F);
- buf[sz+3] = 0x80 | (val & 0x3F);
sz += 4;
} else {
return -1;
@@ -3693,7 +3750,6 @@ erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len)
}
listptr = list_val(list);
}
- return -2; /* not enough space */
}
/*
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index 1538191d67..3a7b3bb50c 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -742,7 +742,8 @@ file_init(void)
efile_init();
#ifdef USE_VM_PROBES
- erts_mtx_init(&dt_driver_mutex, "efile_drv dtrace mutex");
+ erts_mtx_init(&dt_driver_mutex, "efile_drv dtrace mutex", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
pthread_key_create(&dt_driver_key, NULL);
#endif /* USE_VM_PROBES */
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 13ee935e45..7b1f4a0e9c 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -1249,6 +1249,8 @@ static int tcp_shutdown_error(tcp_descriptor* desc, int err);
static int tcp_inet_output(tcp_descriptor* desc, HANDLE event);
static int tcp_inet_input(tcp_descriptor* desc, HANDLE event);
+static void tcp_desc_close(tcp_descriptor*);
+
#ifdef HAVE_UDP
typedef struct {
inet_descriptor inet; /* common data structure (DON'T MOVE) */
@@ -4334,6 +4336,12 @@ static void desc_close(inet_descriptor* desc)
desc->event = INVALID_EVENT; /* closed by stop_select callback */
desc->s = INVALID_SOCKET;
desc->event_mask = 0;
+
+ /* mark as disconnected in case when socket is left lingering due to
+ * {exit_on_close, false} option in gen_tcp socket creation. Next
+ * write to socket should produce {error, enotconn} and send a
+ * message {tcp_error,#Port<>,econnreset} */
+ desc->state &= ~INET_STATE_CONNECTED;
}
}
@@ -9242,16 +9250,31 @@ static void tcp_inet_stop(ErlDrvData e)
tcp_descriptor* desc = (tcp_descriptor*)e;
DEBUGF(("tcp_inet_stop(%ld) {s=%d\r\n",
(long)desc->inet.port, desc->inet.s));
+
tcp_close_check(desc);
- /* free input buffer & output buffer */
- if (desc->i_buf != NULL)
- release_buffer(desc->i_buf);
- desc->i_buf = NULL; /* net_mess2 may call this function recursively when
- faulty messages arrive on dist ports*/
+ tcp_clear_input(desc);
+
DEBUGF(("tcp_inet_stop(%ld) }\r\n", (long)desc->inet.port));
inet_stop(INETP(desc));
}
+/* Closes a tcp descriptor without leaving things hanging; the VM keeps trying
+ * to flush IO queues as long as it contains anything even after the port has
+ * been closed from the erlang side, which is desired behavior (Think escripts
+ * writing to files) but pretty hopeless if the underlying fd has been set to
+ * INVALID_SOCKET through desc_close.
+ *
+ * This function should be used in place of desc_close/erl_inet_close in all
+ * TCP-related operations. Note that this only closes the desc cleanly; it
+ * will be freed through tcp_inet_stop later on. */
+static void tcp_desc_close(tcp_descriptor* desc)
+{
+ tcp_clear_input(desc);
+ tcp_clear_output(desc);
+
+ erl_inet_close(INETP(desc));
+}
+
/* TCP requests from Erlang */
static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd,
char* buf, ErlDrvSizeT len,
@@ -9496,7 +9519,7 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd,
case INET_REQ_CLOSE:
DEBUGF(("tcp_inet_ctl(%ld): CLOSE\r\n", (long)desc->inet.port));
tcp_close_check(desc);
- erl_inet_close(INETP(desc));
+ tcp_desc_close(desc);
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
@@ -9620,7 +9643,7 @@ static void tcp_inet_timeout(ErlDrvData e)
set_busy_port(desc->inet.port, 0);
inet_reply_error_am(INETP(desc), am_timeout);
if (desc->send_timeout_close) {
- erl_inet_close(INETP(desc));
+ tcp_desc_close(desc);
}
}
else {
@@ -9634,7 +9657,7 @@ static void tcp_inet_timeout(ErlDrvData e)
else if ((state & INET_STATE_CONNECTING) == INET_STATE_CONNECTING) {
/* assume connect timeout */
/* close the socket since it's not usable (see man pages) */
- erl_inet_close(INETP(desc));
+ tcp_desc_close(desc);
async_error_am(INETP(desc), am_timeout);
}
else if ((state & INET_STATE_ACCEPTING) == INET_STATE_ACCEPTING) {
@@ -9797,8 +9820,7 @@ static int tcp_recv_closed(tcp_descriptor* desc)
/* passive mode do not terminate port ! */
tcp_clear_input(desc);
if (desc->inet.exitf) {
- tcp_clear_output(desc);
- desc_close(INETP(desc));
+ tcp_desc_close(desc);
} else {
desc_close_read(INETP(desc));
}
@@ -9841,7 +9863,7 @@ static int tcp_recv_error(tcp_descriptor* desc, int err)
driver_cancel_timer(desc->inet.port);
tcp_clear_input(desc);
if (desc->inet.exitf) {
- desc_close(INETP(desc));
+ tcp_desc_close(desc);
} else {
desc_close_read(INETP(desc));
}
@@ -10490,9 +10512,6 @@ static int tcp_send_or_shutdown_error(tcp_descriptor* desc, int err)
set_busy_port(desc->inet.port, 0);
}
- tcp_clear_output(desc);
- tcp_clear_input(desc);
-
/*
* We used to handle "expected errors" differently from unexpected ones.
* Now we handle all errors in the same way (unless the show_econnreset
@@ -10513,10 +10532,10 @@ static int tcp_send_or_shutdown_error(tcp_descriptor* desc, int err)
if (desc->inet.exitf)
driver_exit(desc->inet.port, 0);
else
- desc_close(INETP(desc));
+ tcp_desc_close(desc);
} else {
tcp_close_check(desc);
- erl_inet_close(INETP(desc));
+ tcp_desc_close(desc);
if (desc->inet.caller) {
if (show_econnreset)
diff --git a/erts/emulator/drivers/common/zlib_drv.c b/erts/emulator/drivers/common/zlib_drv.c
deleted file mode 100644
index e342e414b5..0000000000
--- a/erts/emulator/drivers/common/zlib_drv.c
+++ /dev/null
@@ -1,792 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2017. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
- * ZLib interface for erlang
- *
- */
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-#include <stdio.h>
-#include <zlib.h>
-#include <errno.h>
-#include <string.h>
-
-#include "erl_driver.h"
-
-
-#define DEFLATE_INIT 1
-#define DEFLATE_INIT2 2
-#define DEFLATE_SETDICT 3
-#define DEFLATE_RESET 4
-#define DEFLATE_END 5
-#define DEFLATE_PARAMS 6
-#define DEFLATE 7
-
-#define INFLATE_INIT 8
-#define INFLATE_INIT2 9
-#define INFLATE_SETDICT 10
-#define INFLATE_GETDICT 11
-#define INFLATE_SYNC 12
-#define INFLATE_RESET 13
-#define INFLATE_END 14
-#define INFLATE 15
-
-#define CRC32_0 16
-#define CRC32_1 17
-#define CRC32_2 18
-
-#define SET_BUFSZ 19
-#define GET_BUFSZ 20
-#define GET_QSIZE 21
-
-#define ADLER32_1 22
-#define ADLER32_2 23
-
-#define CRC32_COMBINE 24
-#define ADLER32_COMBINE 25
-
-#define INFLATE_CHUNK 26
-
-
-#define DEFAULT_BUFSZ 4000
-
-/* According to zlib documentation, it can never exceed this */
-#define INFL_DICT_SZ 32768
-
-/* This flag is used in the same places, where zlib return codes
- * (Z_OK, Z_STREAM_END, Z_NEED_DICT) are. So, we need to set it to
- * relatively large value to avoid possible value clashes in future.
- * */
-#define INFLATE_HAS_MORE 100
-
-static int zlib_init(void);
-static ErlDrvData zlib_start(ErlDrvPort port, char* buf);
-static void zlib_stop(ErlDrvData e);
-static void zlib_flush(ErlDrvData e);
-static ErlDrvSSizeT zlib_ctl(ErlDrvData drv_data, unsigned int command, char *buf,
- ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen);
-static void zlib_outputv(ErlDrvData drv_data, ErlIOVec *ev);
-
-ErlDrvEntry zlib_driver_entry = {
- zlib_init,
- zlib_start,
- zlib_stop,
- NULL, /* output */
- NULL, /* ready_input */
- NULL, /* ready_output */
- "zlib_drv",
- NULL, /* finish */
- NULL, /* handle */
- zlib_ctl,
- NULL, /* timeout */
- zlib_outputv,
- NULL, /* read_async */
- zlib_flush,
- NULL, /* call */
- NULL, /* event */
- ERL_DRV_EXTENDED_MARKER,
- ERL_DRV_EXTENDED_MAJOR_VERSION,
- ERL_DRV_EXTENDED_MINOR_VERSION,
- ERL_DRV_FLAG_USE_PORT_LOCKING,
- NULL, /* handle2 */
- NULL, /* process_exit */
-};
-
-typedef enum {
- ST_NONE = 0,
- ST_DEFLATE = 1,
- ST_INFLATE = 2
-} ZLibState;
-
-
-typedef struct {
- z_stream s;
- ZLibState state;
- ErlDrvBinary* bin;
- int binsz;
- int binsz_need;
- uLong crc;
- int inflate_eos_seen;
- int want_crc; /* 1 if crc is calculated on clear text */
- ErlDrvPort port; /* the associcated port */
-} ZLibData;
-
-static int zlib_inflate(ZLibData* d, int flush);
-static int zlib_deflate(ZLibData* d, int flush);
-
-#if defined(__WIN32__)
-static int i32(char* buf)
-#else
-static __inline__ int i32(char* buf)
-#endif
-{
- return (int) (
- (((int)((unsigned char*)buf)[0]) << 24) |
- (((int)((unsigned char*)buf)[1]) << 16) |
- (((int)((unsigned char*)buf)[2]) << 8) |
- (((int)((unsigned char*)buf)[3]) << 0));
-}
-
-static char* zlib_reason(int code, int* err)
-{
- switch(code) {
- case Z_OK:
- *err = 0;
- return "ok";
- case Z_STREAM_END:
- *err = 0;
- return "stream_end";
- case Z_ERRNO:
- *err = 1;
- return erl_errno_id(errno);
- case Z_STREAM_ERROR:
- *err = 1;
- return "stream_error";
- case Z_DATA_ERROR:
- *err = 1;
- return "data_error";
- case Z_MEM_ERROR:
- *err = 1;
- return "mem_error";
- case Z_BUF_ERROR:
- *err = 1;
- return "buf_error";
- case Z_VERSION_ERROR:
- *err = 1;
- return "version_error";
- default:
- *err = 1;
- return "unknown_error";
- }
-}
-
-
-static ErlDrvSSizeT zlib_return(int code, char** rbuf, ErlDrvSizeT rlen)
-{
- int msg_code = 0; /* 0=ok, 1=error */
- char* dst = *rbuf;
- char* src;
- ErlDrvSizeT len = 0;
-
- src = zlib_reason(code, &msg_code);
- *dst++ = msg_code;
- rlen--;
- len = 1;
-
- while((rlen > 0) && *src) {
- *dst++ = *src++;
- rlen--;
- len++;
- }
- return len;
-}
-
-static ErlDrvSSizeT zlib_value2(int msg_code, int value,
- char** rbuf, ErlDrvSizeT rlen)
-{
- char* dst = *rbuf;
-
- if (rlen < 5) {
- return -1;
- }
- *dst++ = msg_code;
- *dst++ = (value >> 24) & 0xff;
- *dst++ = (value >> 16) & 0xff;
- *dst++ = (value >> 8) & 0xff;
- *dst++ = value & 0xff;
- return 5;
-}
-
-static ErlDrvSSizeT zlib_value(int value, char** rbuf, ErlDrvSizeT rlen)
-{
- return zlib_value2(2, value, rbuf, rlen);
-}
-
-static int zlib_output_init(ZLibData* d)
-{
- if (d->bin != NULL)
- driver_free_binary(d->bin);
- if ((d->bin = driver_alloc_binary(d->binsz_need)) == NULL)
- return -1;
- d->binsz = d->binsz_need;
- d->s.next_out = (unsigned char*)d->bin->orig_bytes;
- d->s.avail_out = d->binsz;
- return 0;
-}
-
-/*
- * Send compressed or uncompressed data
- * and restart output procesing
- */
-static int zlib_output(ZLibData* d)
-{
- if (d->bin != NULL) {
- int len = d->binsz - d->s.avail_out;
- if (len > 0) {
- if (driver_output_binary(d->port, NULL, 0, d->bin, 0, len) < 0)
- return -1;
- }
- driver_free_binary(d->bin);
- d->bin = NULL;
- d->binsz = 0;
- }
- return zlib_output_init(d);
-}
-
-static int zlib_inflate_get_dictionary(ZLibData* d)
-{
-#ifdef HAVE_ZLIB_INFLATEGETDICTIONARY
- ErlDrvBinary* dbin = driver_alloc_binary(INFL_DICT_SZ);
- uInt dlen = 0;
- int res = inflateGetDictionary(&d->s, (unsigned char*)dbin->orig_bytes, &dlen);
- if ((res == Z_OK) && (driver_output_binary(d->port, NULL, 0, dbin, 0, dlen) < 0)) {
- res = Z_ERRNO;
- }
- driver_free_binary(dbin);
- return res;
-#else
- abort(); /* never called, just to silence 'unresolved symbol'
- for non-optimizing compiler */
-#endif
-}
-
-static int zlib_inflate(ZLibData* d, int flush)
-{
- int res = Z_OK;
-
- if ((d->bin == NULL) && (zlib_output_init(d) < 0)) {
- errno = ENOMEM;
- return Z_ERRNO;
- }
-
- while ((driver_sizeq(d->port) > 0) && (res != Z_STREAM_END)) {
- int vlen;
- SysIOVec* iov = driver_peekq(d->port, &vlen);
- int len;
- int possibly_more_output = 0;
-
- d->s.next_in = iov[0].iov_base;
- d->s.avail_in = iov[0].iov_len;
- while((possibly_more_output || (d->s.avail_in > 0)) && (res != Z_STREAM_END)) {
- res = inflate(&d->s, Z_NO_FLUSH);
- if (res == Z_NEED_DICT) {
- /* Essential to eat the header bytes that zlib has looked at */
- len = iov[0].iov_len - d->s.avail_in;
- driver_deq(d->port, len);
- return res;
- }
- if (res == Z_BUF_ERROR) {
- /* Was possible more output, but actually not */
- res = Z_OK;
- }
- else if (res < 0) {
- return res;
- }
- if (d->s.avail_out != 0) {
- possibly_more_output = 0;
- } else {
- if (d->want_crc)
- d->crc = crc32(d->crc, (unsigned char*)d->bin->orig_bytes,
- d->binsz - d->s.avail_out);
- zlib_output(d);
- possibly_more_output = 1;
- }
- }
- len = iov[0].iov_len - d->s.avail_in;
- driver_deq(d->port, len);
- }
-
- if (d->want_crc) {
- d->crc = crc32(d->crc, (unsigned char*) d->bin->orig_bytes,
- d->binsz - d->s.avail_out);
- }
- zlib_output(d);
- if (res == Z_STREAM_END) {
- d->inflate_eos_seen = 1;
- }
- return res;
-}
-
-static int zlib_inflate_chunk(ZLibData* d)
-{
- int res = Z_OK;
-
- if ((d->bin == NULL) && (zlib_output_init(d) < 0)) {
- errno = ENOMEM;
- return Z_ERRNO;
- }
-
- while ((driver_sizeq(d->port) > 0) && (d->s.avail_out > 0) &&
- (res != Z_STREAM_END)) {
- int vlen;
- SysIOVec* iov = driver_peekq(d->port, &vlen);
- int len;
-
- d->s.next_in = iov[0].iov_base;
- d->s.avail_in = iov[0].iov_len;
- while((d->s.avail_in > 0) && (d->s.avail_out > 0) && (res != Z_STREAM_END)) {
- res = inflate(&d->s, Z_NO_FLUSH);
- if (res == Z_NEED_DICT) {
- /* Essential to eat the header bytes that zlib has looked at */
- len = iov[0].iov_len - d->s.avail_in;
- driver_deq(d->port, len);
- return res;
- }
- if (res == Z_BUF_ERROR) {
- /* Was possible more output, but actually not */
- res = Z_OK;
- }
- else if (res < 0) {
- return res;
- }
- }
- len = iov[0].iov_len - d->s.avail_in;
- driver_deq(d->port, len);
- }
-
- /* We are here because all input was consumed or EOS reached or output
- * buffer is full */
- if (d->want_crc) {
- d->crc = crc32(d->crc, (unsigned char*) d->bin->orig_bytes,
- d->binsz - d->s.avail_out);
- }
- zlib_output(d);
- if ((res == Z_OK) && (d->s.avail_in > 0))
- res = INFLATE_HAS_MORE;
- else if (res == Z_STREAM_END) {
- d->inflate_eos_seen = 1;
- }
- return res;
-}
-
-static int zlib_deflate(ZLibData* d, int flush)
-{
- int res = Z_OK;
-
- if ((d->bin == NULL) && (zlib_output_init(d) < 0)) {
- errno = ENOMEM;
- return Z_ERRNO;
- }
-
- while ((driver_sizeq(d->port) > 0) && (res != Z_STREAM_END)) {
- int vlen;
- SysIOVec* iov = driver_peekq(d->port, &vlen);
- int len;
-
- d->s.next_in = iov[0].iov_base;
- d->s.avail_in = iov[0].iov_len;
-
- while((d->s.avail_in > 0) && (res != Z_STREAM_END)) {
- if ((res = deflate(&d->s, Z_NO_FLUSH)) < 0) {
- return res;
- }
- if (d->s.avail_out == 0) {
- zlib_output(d);
- }
- }
- len = iov[0].iov_len - d->s.avail_in;
- if (d->want_crc) {
- d->crc = crc32(d->crc, iov[0].iov_base, len);
- }
- driver_deq(d->port, len);
- }
-
- if (flush != Z_NO_FLUSH) {
- if ((res = deflate(&d->s, flush)) < 0) {
- return res;
- }
- if (flush == Z_FINISH) {
- while (d->s.avail_out < d->binsz) {
- zlib_output(d);
- if (res == Z_STREAM_END) {
- break;
- }
- if ((res = deflate(&d->s, flush)) < 0) {
- return res;
- }
- }
- } else {
- while (d->s.avail_out == 0) {
- zlib_output(d);
- if ((res = deflate(&d->s, flush)) < 0) {
- return res;
- }
- }
- if (d->s.avail_out < d->binsz) {
- zlib_output(d);
- }
- }
- }
- return res;
-}
-
-
-
-static void* zlib_alloc(void* data, unsigned int items, unsigned int size)
-{
- return (void*) driver_alloc(items*size);
-}
-
-static void zlib_free(void* data, void* addr)
-{
- driver_free(addr);
-}
-
-#if defined(__APPLE__) && defined(__MACH__) && defined(HAVE_ZLIB_INFLATEGETDICTIONARY)
-
-/* Work around broken build system with runtime version test */
-static int have_inflateGetDictionary;
-
-static int zlib_init()
-{
- unsigned int v[4] = {0, 0, 0, 0};
- unsigned hexver;
-
- sscanf(zlibVersion(), "%u.%u.%u.%u", &v[0], &v[1], &v[2], &v[3]);
-
- hexver = (v[0] << (8*3)) | (v[1] << (8*2)) | (v[2] << (8)) | v[3];
-
- have_inflateGetDictionary = (hexver >= 0x1020701); /* 1.2.7.1 */
-
- return 0;
-}
-#else /* trust configure got it right */
-# ifdef HAVE_ZLIB_INFLATEGETDICTIONARY
-# define have_inflateGetDictionary 1
-# else
-# define have_inflateGetDictionary 0
-# endif
-static int zlib_init()
-{
- return 0;
-}
-#endif
-
-static ErlDrvData zlib_start(ErlDrvPort port, char* buf)
-{
- ZLibData* d;
-
- if ((d = (ZLibData*) driver_alloc(sizeof(ZLibData))) == NULL)
- return ERL_DRV_ERROR_GENERAL;
-
- memset(&d->s, 0, sizeof(z_stream));
-
- d->s.zalloc = zlib_alloc;
- d->s.zfree = zlib_free;
- d->s.opaque = d;
- d->s.data_type = Z_BINARY;
-
- d->port = port;
- d->state = ST_NONE;
- d->bin = NULL;
- d->binsz = 0;
- d->binsz_need = DEFAULT_BUFSZ;
- d->crc = crc32(0L, Z_NULL, 0);
- d->inflate_eos_seen = 0;
- d->want_crc = 0;
- return (ErlDrvData)d;
-}
-
-
-static void zlib_stop(ErlDrvData e)
-{
- ZLibData* d = (ZLibData*)e;
-
- if (d->state == ST_DEFLATE)
- deflateEnd(&d->s);
- else if (d->state == ST_INFLATE)
- inflateEnd(&d->s);
-
- if (d->bin != NULL)
- driver_free_binary(d->bin);
-
- driver_free(d);
-}
-
-static void zlib_flush(ErlDrvData drv_data)
-{
- ZLibData* d = (ZLibData*) drv_data;
-
- driver_deq(d->port, driver_sizeq(d->port));
-}
-
-static ErlDrvSSizeT zlib_ctl(ErlDrvData drv_data, unsigned int command, char *buf,
- ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen)
-{
- ZLibData* d = (ZLibData*)drv_data;
- int res;
-
- switch(command) {
- case DEFLATE_INIT:
- if (len != 4) goto badarg;
- if (d->state != ST_NONE) goto badarg;
- res = deflateInit(&d->s, i32(buf));
- if (res == Z_OK) {
- d->state = ST_DEFLATE;
- d->want_crc = 0;
- d->crc = crc32(0L, Z_NULL, 0);
- }
- return zlib_return(res, rbuf, rlen);
-
- case DEFLATE_INIT2: {
- int wbits;
-
- if (len != 20) goto badarg;
- if (d->state != ST_NONE) goto badarg;
- wbits = i32(buf+8);
- res = deflateInit2(&d->s, i32(buf), i32(buf+4), wbits,
- i32(buf+12), i32(buf+16));
- if (res == Z_OK) {
- d->state = ST_DEFLATE;
- d->want_crc = (wbits < 0);
- d->crc = crc32(0L, Z_NULL, 0);
- }
- return zlib_return(res, rbuf, rlen);
- }
-
- case DEFLATE_SETDICT:
- if (d->state != ST_DEFLATE) goto badarg;
- res = deflateSetDictionary(&d->s, (unsigned char*)buf, len);
- if (res == Z_OK) {
- return zlib_value(d->s.adler, rbuf, rlen);
- } else {
- return zlib_return(res, rbuf, rlen);
- }
-
- case DEFLATE_RESET:
- if (len != 0) goto badarg;
- if (d->state != ST_DEFLATE) goto badarg;
- driver_deq(d->port, driver_sizeq(d->port));
- res = deflateReset(&d->s);
- return zlib_return(res, rbuf, rlen);
-
- case DEFLATE_END:
- if (len != 0) goto badarg;
- if (d->state != ST_DEFLATE) goto badarg;
- driver_deq(d->port, driver_sizeq(d->port));
- res = deflateEnd(&d->s);
- d->state = ST_NONE;
- return zlib_return(res, rbuf, rlen);
-
- case DEFLATE_PARAMS:
- if (len != 8) goto badarg;
- if (d->state != ST_DEFLATE) goto badarg;
- res = deflateParams(&d->s, i32(buf), i32(buf+4));
- return zlib_return(res, rbuf, rlen);
-
- case DEFLATE:
- if (d->state != ST_DEFLATE) goto badarg;
- if (len != 4) goto badarg;
- res = zlib_deflate(d, i32(buf));
- return zlib_return(res, rbuf, rlen);
-
- case INFLATE_INIT:
- if (len != 0) goto badarg;
- if (d->state != ST_NONE) goto badarg;
- res = inflateInit(&d->s);
- if (res == Z_OK) {
- d->state = ST_INFLATE;
- d->inflate_eos_seen = 0;
- d->want_crc = 0;
- d->crc = crc32(0L, Z_NULL, 0);
- }
- return zlib_return(res, rbuf, rlen);
-
- case INFLATE_INIT2: {
- int wbits;
-
- if (len != 4) goto badarg;
- if (d->state != ST_NONE) goto badarg;
- wbits = i32(buf);
- res = inflateInit2(&d->s, wbits);
- if (res == Z_OK) {
- d->state = ST_INFLATE;
- d->inflate_eos_seen = 0;
- d->want_crc = (wbits < 0);
- d->crc = crc32(0L, Z_NULL, 0);
- }
- return zlib_return(res, rbuf, rlen);
- }
-
- case INFLATE_SETDICT:
- if (d->state != ST_INFLATE) goto badarg;
- res = inflateSetDictionary(&d->s, (unsigned char*)buf, len);
- return zlib_return(res, rbuf, rlen);
-
- case INFLATE_GETDICT:
- if (have_inflateGetDictionary) {
- if (d->state != ST_INFLATE) goto badarg;
- res = zlib_inflate_get_dictionary(d);
- } else {
- errno = ENOTSUP;
- res = Z_ERRNO;
- }
- return zlib_return(res, rbuf, rlen);
-
- case INFLATE_SYNC:
- if (d->state != ST_INFLATE) goto badarg;
- if (len != 0) goto badarg;
- if (driver_sizeq(d->port) == 0) {
- res = Z_BUF_ERROR;
- } else {
- int vlen;
- SysIOVec* iov = driver_peekq(d->port, &vlen);
-
- d->s.next_in = iov[0].iov_base;
- d->s.avail_in = iov[0].iov_len;
- res = inflateSync(&d->s);
- }
- return zlib_return(res, rbuf, rlen);
-
- case INFLATE_RESET:
- if (d->state != ST_INFLATE) goto badarg;
- if (len != 0) goto badarg;
- driver_deq(d->port, driver_sizeq(d->port));
- res = inflateReset(&d->s);
- d->inflate_eos_seen = 0;
- return zlib_return(res, rbuf, rlen);
-
- case INFLATE_END:
- if (d->state != ST_INFLATE) goto badarg;
- if (len != 0) goto badarg;
- driver_deq(d->port, driver_sizeq(d->port));
- res = inflateEnd(&d->s);
- if (res == Z_OK && d->inflate_eos_seen == 0) {
- res = Z_DATA_ERROR;
- }
- d->state = ST_NONE;
- return zlib_return(res, rbuf, rlen);
-
- case INFLATE:
- if (d->state != ST_INFLATE) goto badarg;
- if (len != 4) goto badarg;
- res = zlib_inflate(d, i32(buf));
- if (res == Z_NEED_DICT) {
- return zlib_value2(3, d->s.adler, rbuf, rlen);
- } else {
- return zlib_return(res, rbuf, rlen);
- }
-
- case INFLATE_CHUNK:
- if (d->state != ST_INFLATE) goto badarg;
- if (len != 0) goto badarg;
- res = zlib_inflate_chunk(d);
- if (res == INFLATE_HAS_MORE) {
- return zlib_value2(4, 0, rbuf, rlen);
- } else if (res == Z_NEED_DICT) {
- return zlib_value2(3, d->s.adler, rbuf, rlen);
- } else {
- return zlib_return(res, rbuf, rlen);
- }
-
- case GET_QSIZE:
- return zlib_value(driver_sizeq(d->port), rbuf, rlen);
-
- case GET_BUFSZ:
- return zlib_value(d->binsz_need, rbuf, rlen);
-
- case SET_BUFSZ: {
- int need;
- if (len != 4) goto badarg;
- need = i32(buf);
- if ((need < 16) || (need > 0x00ffffff))
- goto badarg;
- if (d->binsz_need != need) {
- d->binsz_need = need;
- if (d->bin != NULL) {
- if (d->s.avail_out == d->binsz) {
- driver_free_binary(d->bin);
- d->bin = NULL;
- d->binsz = 0;
- }
- else
- zlib_output(d);
- }
- }
- return zlib_return(Z_OK, rbuf, rlen);
- }
-
- case CRC32_0:
- return zlib_value(d->crc, rbuf, rlen);
-
- case CRC32_1: {
- uLong crc = crc32(0L, Z_NULL, 0);
- crc = crc32(crc, (unsigned char*) buf, len);
- return zlib_value(crc, rbuf, rlen);
- }
-
- case CRC32_2: {
- uLong crc;
- if (len < 4) goto badarg;
- crc = (unsigned int) i32(buf);
- crc = crc32(crc, (unsigned char*) buf+4, len-4);
- return zlib_value(crc, rbuf, rlen);
- }
-
- case ADLER32_1: {
- uLong adler = adler32(0L, Z_NULL, 0);
- adler = adler32(adler, (unsigned char*) buf, len);
- return zlib_value(adler, rbuf, rlen);
- }
-
- case ADLER32_2: {
- uLong adler;
- if (len < 4) goto badarg;
- adler = (unsigned int) i32(buf);
- adler = adler32(adler, (unsigned char*) buf+4, len-4);
- return zlib_value(adler, rbuf, rlen);
- }
-
- case CRC32_COMBINE: {
- uLong crc, crc1, crc2, len2;
- if (len != 12) goto badarg;
- crc1 = (unsigned int) i32(buf);
- crc2 = (unsigned int) i32(buf+4);
- len2 = (unsigned int) i32(buf+8);
- crc = crc32_combine(crc1, crc2, len2);
- return zlib_value(crc, rbuf, rlen);
- }
-
- case ADLER32_COMBINE: {
- uLong adler, adler1, adler2, len2;
- if (len != 12) goto badarg;
- adler1 = (unsigned int) i32(buf);
- adler2 = (unsigned int) i32(buf+4);
- len2 = (unsigned int) i32(buf+8);
- adler = adler32_combine(adler1, adler2, len2);
- return zlib_value(adler, rbuf, rlen);
- }
- }
-
- badarg:
- errno = EINVAL;
- return zlib_return(Z_ERRNO, rbuf, rlen);
-}
-
-
-
-static void zlib_outputv(ErlDrvData drv_data, ErlIOVec *ev)
-{
- ZLibData* d = (ZLibData*) drv_data;
-
- driver_enqv(d->port, ev, 0);
-}
diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c
index e425b99f16..f3c1aa1c4a 100644
--- a/erts/emulator/drivers/unix/ttsl_drv.c
+++ b/erts/emulator/drivers/unix/ttsl_drv.c
@@ -108,16 +108,15 @@ static int lbuf_size = BUFSIZ;
static Uint32 *lbuf; /* The current line buffer */
static int llen; /* The current line length */
static int lpos; /* The current "cursor position" in the line buffer */
-
+ /* NOTE: not the same as column position a char may not take a"
+ * column to display or it might take many columns
+ */
/*
* Tags used in line buffer to show that these bytes represent special characters,
* Max unicode is 0x0010ffff, so we have lots of place for meta tags...
*/
#define CONTROL_TAG 0x10000000U /* Control character, value in first position */
#define ESCAPED_TAG 0x01000000U /* Escaped character, value in first position */
-#ifdef HAVE_WCWIDTH
-#define WIDE_TAG 0x02000000U /* Wide character, value in first position */
-#endif
#define TAG_MASK 0xFF000000U
#define MAXSIZE (1 << 16)
@@ -156,6 +155,8 @@ static int insert_buf(byte*,int);
static int write_buf(Uint32 *,int);
static int outc(int c);
static int move_cursor(int,int);
+static int cp_pos_to_col(int cp_pos);
+
/* Termcap functions. */
static int start_termcap(void);
@@ -991,24 +992,26 @@ static int del_chars(int n)
{
int i, l, r;
int pos;
+ int gcs; /* deleted grapheme characters */
update_cols();
/* Step forward or backwards over n logical characters. */
pos = step_over_chars(n);
-
+ DEBUGLOG(("del_chars: %d from %d %d %d\n", n, lpos, pos, llen));
if (pos > lpos) {
l = pos - lpos; /* Buffer characters to delete */
r = llen - lpos - l; /* Characters after deleted */
+ gcs = cp_pos_to_col(pos) - cp_pos_to_col(lpos);
/* Fix up buffer and buffer pointers. */
if (r > 0)
memmove(lbuf + lpos, lbuf + pos, r * sizeof(Uint32));
llen -= l;
/* Write out characters after, blank the tail and jump back to lpos. */
write_buf(lbuf + lpos, r);
- for (i = l ; i > 0; --i)
+ for (i = gcs ; i > 0; --i)
outc(' ');
- if (COL(llen+l) == 0 && xn)
+ if (xn && COL(cp_pos_to_col(llen)+gcs) == 0)
{
outc(' ');
move_left(1);
@@ -1018,7 +1021,7 @@ static int del_chars(int n)
else if (pos < lpos) {
l = lpos - pos; /* Buffer characters */
r = llen - lpos; /* Characters after deleted */
- move_cursor(lpos, lpos-l); /* Move back */
+ gcs = -move_cursor(lpos, lpos-l); /* Move back */
/* Fix up buffer and buffer pointers. */
if (r > 0)
memmove(lbuf + pos, lbuf + lpos, r * sizeof(Uint32));
@@ -1026,14 +1029,14 @@ static int del_chars(int n)
llen -= l;
/* Write out characters after, blank the tail and jump back to lpos. */
write_buf(lbuf + lpos, r);
- for (i = l ; i > 0; --i)
- outc(' ');
- if (COL(llen+l) == 0 && xn)
+ for (i = gcs ; i > 0; --i)
+ outc(' ');
+ if (xn && COL(cp_pos_to_col(llen)+gcs) == 0)
{
- outc(' ');
- move_left(1);
+ outc(' ');
+ move_left(1);
}
- move_cursor(llen + l, lpos);
+ move_cursor(llen + l, lpos);
}
return TRUE;
}
@@ -1047,22 +1050,12 @@ static int step_over_chars(int n)
end = lbuf + llen;
c = lbuf + lpos;
for ( ; n > 0 && c < end; --n) {
-#ifdef HAVE_WCWIDTH
- while (*c & WIDE_TAG) {
- c++;
- }
-#endif
c++;
while (c < end && (*c & TAG_MASK) && ((*c & ~TAG_MASK) == 0))
c++;
}
for ( ; n < 0 && c > beg; n++) {
--c;
-#ifdef HAVE_WCWIDTH
- while (c > beg + 1 && (c[-1] & WIDE_TAG)) {
- --c;
- }
-#endif
while (c > beg && (*c & TAG_MASK) && ((*c & ~TAG_MASK) == 0))
--c;
}
@@ -1088,15 +1081,6 @@ static int insert_buf(byte *s, int n)
++pos;
}
if ((utf8_mode && (ch >= 128 || isprint(ch))) || (ch <= 255 && isprint(ch))) {
-#ifdef HAVE_WCWIDTH
- int width;
- if ((width = wcwidth(ch)) > 1) {
- while (--width) {
- DEBUGLOG(("insert_buf: Wide(UTF-8):%d,%d",width,ch));
- lbuf[lpos++] = (WIDE_TAG | ((Uint32) ch));
- }
- }
-#endif
DEBUGLOG(("insert_buf: Printable(UTF-8):%d",ch));
lbuf[lpos++] = (Uint32) ch;
} else if (ch >= 128) { /* not utf8 mode */
@@ -1110,15 +1094,13 @@ static int insert_buf(byte *s, int n)
lbuf[lpos++] = (CONTROL_TAG | ((Uint32) ch));
ch = 0;
} while (lpos % 8);
- } else if (ch == '\e' || ch == '\n' || ch == '\r') {
+ } else if (ch == '\e') {
+ lbuf[lpos++] = (CONTROL_TAG | ((Uint32) ch));
+ } else if (ch == '\n' || ch == '\r') {
write_buf(lbuf + buffpos, lpos - buffpos);
- if (ch == '\e') {
- outc('\e');
- } else {
outc('\r');
if (ch == '\n')
outc('\n');
- }
if (llen > lpos) {
memcpy(lbuf, lbuf + lpos, llen - lpos);
}
@@ -1166,14 +1148,17 @@ static int write_buf(Uint32 *s, int n)
}
--n;
++s;
- }
- else if (*s == (CONTROL_TAG | ((Uint32) '\t'))) {
+ } else if (*s == (CONTROL_TAG | ((Uint32) '\t'))) {
outc(lastput = ' ');
--n; s++;
while (n > 0 && *s == CONTROL_TAG) {
outc(lastput = ' ');
--n; s++;
}
+ } else if (*s == (CONTROL_TAG | ((Uint32) '\e'))) {
+ outc('\e');
+ --n;
+ ++s;
} else if (*s & CONTROL_TAG) {
outc('^');
outc(lastput = ((byte) ((*s == 0177) ? '?' : *s | 0x40)));
@@ -1204,10 +1189,6 @@ static int write_buf(Uint32 *s, int n)
if (octbuff != octtmp) {
driver_free(octbuff);
}
-#ifdef HAVE_WCWIDTH
- } else if (*s & WIDE_TAG) {
- --n; s++;
-#endif
} else {
DEBUGLOG(("write_buf: Very unexpected character %d",(int) *s));
++n;
@@ -1216,7 +1197,7 @@ static int write_buf(Uint32 *s, int n)
}
/* Check landed in first column of new line and have 'xn' bug. */
n = s - lbuf;
- if (COL(n) == 0 && xn && n != 0) {
+ if (xn && n != 0 && COL(cp_pos_to_col(n)) == 0) {
if (n >= llen) {
outc(' ');
} else if (lastput == 0) { /* A multibyte UTF8 character */
@@ -1246,14 +1227,19 @@ static int outc(int c)
return 1;
}
-static int move_cursor(int from, int to)
+static int move_cursor(int from_pos, int to_pos)
{
+ int from_col, to_col;
int dc, dl;
-
update_cols();
- dc = COL(to) - COL(from);
- dl = LINE(to) - LINE(from);
+ from_col = cp_pos_to_col(from_pos);
+ to_col = cp_pos_to_col(to_pos);
+
+ dc = COL(to_col) - COL(from_col);
+ dl = LINE(to_col) - LINE(from_col);
+ DEBUGLOG(("move_cursor: from %d %d to %d %d => %d %d\n",
+ from_pos, from_col, to_pos, to_col, dl, dc));
if (dl > 0)
move_down(dl);
else if (dl < 0)
@@ -1262,7 +1248,29 @@ static int move_cursor(int from, int to)
move_right(dc);
else if (dc < 0)
move_left(-dc);
- return TRUE;
+ return to_col-from_col;
+}
+
+static int cp_pos_to_col(int cp_pos)
+{
+#ifdef HAVE_WCWIDTH
+ int i;
+ int col = 0;
+
+ for (i = 0; i < cp_pos; i++) {
+ int w = wcwidth(lbuf[i]);
+ if (w > 0) {
+ col += w;
+ }
+ }
+ return col;
+#else
+ /*
+ * We dont' have any character width information. Assume that
+ * code points are one column wide.
+ */
+ return cp_pos;
+#endif
}
static int start_termcap(void)
diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c
index 0225f17613..94bc563fda 100644
--- a/erts/emulator/hipe/hipe_bif0.c
+++ b/erts/emulator/hipe/hipe_bif0.c
@@ -1129,7 +1129,8 @@ struct hipe_ref {
static inline void hipe_mfa_info_table_init_lock(void)
{
- erts_smp_rwmtx_init(&hipe_mfa_info_table.lock, "hipe_mfait_lock");
+ erts_smp_rwmtx_init(&hipe_mfa_info_table.lock, "hipe_mfait_lock", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
static inline void hipe_mfa_info_table_rlock(void)
diff --git a/erts/emulator/nifs/common/zlib_nif.c b/erts/emulator/nifs/common/zlib_nif.c
new file mode 100644
index 0000000000..b7f3adaffe
--- /dev/null
+++ b/erts/emulator/nifs/common/zlib_nif.c
@@ -0,0 +1,1019 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#define STATIC_ERLANG_NIF 1
+
+#include <stdio.h>
+#include <zlib.h>
+
+#include "erl_nif.h"
+#include "config.h"
+#include "sys.h"
+
+#ifdef VALGRIND
+# include <valgrind/memcheck.h>
+#endif
+
+#define INFL_DICT_SZ (32768)
+
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
+/* NIF interface declarations */
+static int load(ErlNifEnv *env, void** priv_data, ERL_NIF_TERM load_info);
+static int upgrade(ErlNifEnv *env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
+static void unload(ErlNifEnv *env, void* priv_data);
+
+static ErlNifResourceType *rtype_zlib;
+
+static ERL_NIF_TERM am_not_on_controlling_process;
+
+static ERL_NIF_TERM am_not_initialized;
+static ERL_NIF_TERM am_already_initialized;
+
+static ERL_NIF_TERM am_ok;
+static ERL_NIF_TERM am_error;
+
+static ERL_NIF_TERM am_continue;
+static ERL_NIF_TERM am_finished;
+
+static ERL_NIF_TERM am_not_supported;
+static ERL_NIF_TERM am_need_dictionary;
+
+static ERL_NIF_TERM am_empty;
+
+static ERL_NIF_TERM am_stream_end;
+static ERL_NIF_TERM am_stream_error;
+static ERL_NIF_TERM am_data_error;
+static ERL_NIF_TERM am_mem_error;
+static ERL_NIF_TERM am_buf_error;
+static ERL_NIF_TERM am_version_error;
+static ERL_NIF_TERM am_unknown_error;
+
+typedef enum {
+ ST_NONE = 0,
+ ST_DEFLATE = 1,
+ ST_INFLATE = 2,
+ ST_CLOSED = 3
+} zlib_state;
+
+typedef struct {
+ z_stream s;
+ zlib_state state;
+
+ /* These refer to the plaintext CRC, and are only needed for zlib:crc32/1
+ * which is deprecated. */
+ uLong input_crc;
+ uLong output_crc;
+ int want_input_crc;
+ int want_output_crc;
+
+ int is_raw_stream;
+
+ int eos_seen;
+
+ /* DEPRECATED */
+ int inflateChunk_buffer_size;
+
+ ErlNifPid controlling_process;
+
+ ErlNifIOQueue *input_queue;
+
+ ErlNifEnv *stash_env;
+ ERL_NIF_TERM stash_term;
+} zlib_data_t;
+
+/* The NIFs: */
+
+static ERL_NIF_TERM zlib_open(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_close(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_deflateInit(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_deflateInit2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_deflateSetDictionary(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_deflateReset(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_deflateEnd(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_deflateParams(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_deflate(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+
+static ERL_NIF_TERM zlib_inflateInit(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_inflateInit2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_inflateSetDictionary(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_inflateGetDictionary(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_inflateReset(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_inflateEnd(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_inflate(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+
+static ERL_NIF_TERM zlib_crc32(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+
+static ERL_NIF_TERM zlib_clearStash(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_setStash(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_getStash(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+
+static ERL_NIF_TERM zlib_getBufSize(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM zlib_setBufSize(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+
+static ERL_NIF_TERM zlib_enqueue_input(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]);
+
+static ErlNifFunc nif_funcs[] = {
+ /* deflate */
+ {"deflateInit_nif", 2, zlib_deflateInit},
+ {"deflateInit_nif", 6, zlib_deflateInit2},
+ {"deflateSetDictionary_nif", 2, zlib_deflateSetDictionary},
+ {"deflateReset_nif", 1, zlib_deflateReset},
+ {"deflateEnd_nif", 1, zlib_deflateEnd},
+ {"deflateParams_nif", 3, zlib_deflateParams},
+ {"deflate_nif", 4, zlib_deflate},
+
+ /* inflate */
+ {"inflateInit_nif", 1, zlib_inflateInit},
+ {"inflateInit_nif", 2, zlib_inflateInit2},
+ {"inflateSetDictionary_nif", 2, zlib_inflateSetDictionary},
+ {"inflateGetDictionary_nif", 1, zlib_inflateGetDictionary},
+ {"inflateReset_nif", 1, zlib_inflateReset},
+ {"inflateEnd_nif", 1, zlib_inflateEnd},
+ {"inflate_nif", 4, zlib_inflate},
+
+ /* running checksum */
+ {"crc32_nif", 1, zlib_crc32},
+
+ /* open & close */
+ {"close_nif", 1, zlib_close},
+ {"open_nif", 0, zlib_open},
+
+ /* The stash keeps a single term alive across calls, and is used in
+ * exception_on_need_dict/1 to retain the old error behavior, and for
+ * saving data flushed through deflateParams/3. */
+ {"getStash_nif", 1, zlib_getStash},
+ {"clearStash_nif", 1, zlib_clearStash},
+ {"setStash_nif", 2, zlib_setStash},
+
+ /* DEPRECATED: buffer size for inflateChunk */
+ {"getBufSize_nif", 1, zlib_getBufSize},
+ {"setBufSize_nif", 2, zlib_setBufSize},
+
+ {"enqueue_nif", 2, zlib_enqueue_input},
+};
+
+ERL_NIF_INIT(zlib, nif_funcs, load, NULL, upgrade, unload)
+
+static void gc_zlib(ErlNifEnv *env, void* data);
+
+static int load(ErlNifEnv *env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ am_not_on_controlling_process =
+ enif_make_atom(env, "not_on_controlling_process");
+
+ am_not_initialized = enif_make_atom(env, "not_initialized");
+ am_already_initialized = enif_make_atom(env, "already_initialized");
+
+ am_ok = enif_make_atom(env, "ok");
+ am_error = enif_make_atom(env, "error");
+
+ am_continue = enif_make_atom(env, "continue");
+ am_finished = enif_make_atom(env, "finished");
+
+ am_not_supported = enif_make_atom(env, "not_supported");
+ am_need_dictionary = enif_make_atom(env, "need_dictionary");
+
+ am_empty = enif_make_atom(env, "empty");
+
+ am_stream_end = enif_make_atom(env, "stream_end");
+ am_stream_error = enif_make_atom(env, "stream_error");
+ am_data_error = enif_make_atom(env, "data_error");
+ am_mem_error = enif_make_atom(env, "mem_error");
+ am_buf_error = enif_make_atom(env, "buf_error");
+ am_version_error = enif_make_atom(env, "version_error");
+ am_unknown_error = enif_make_atom(env, "unknown_error");
+
+ rtype_zlib = enif_open_resource_type(env, NULL,
+ "gc_zlib", gc_zlib, ERL_NIF_RT_CREATE, NULL);
+ *priv_data = NULL;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv *env, void* priv_data)
+{
+
+}
+
+static int upgrade(ErlNifEnv *env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ if(*old_priv_data != NULL) {
+ return -1; /* Don't know how to do that */
+ }
+ if(*priv_data != NULL) {
+ return -1; /* Don't know how to do that */
+ }
+ if(load(env, priv_data, load_info)) {
+ return -1;
+ }
+ return 0;
+}
+
+static void* zlib_alloc(void* data, unsigned int items, unsigned int size)
+{
+ return (void*) enif_alloc(items * size);
+}
+
+static void zlib_free(void* data, void* addr)
+{
+ enif_free(addr);
+}
+
+static ERL_NIF_TERM zlib_return(ErlNifEnv *env, int code) {
+ ERL_NIF_TERM reason;
+ switch(code) {
+ case Z_OK:
+ reason = am_ok;
+ break;
+ case Z_STREAM_END:
+ reason = am_stream_end;
+ break;
+ case Z_ERRNO:
+ reason = enif_make_int(env, errno);
+ break;
+ case Z_STREAM_ERROR:
+ reason = enif_raise_exception(env, am_stream_error);
+ break;
+ case Z_DATA_ERROR:
+ reason = enif_raise_exception(env, am_data_error);
+ break;
+ case Z_MEM_ERROR:
+ reason = am_mem_error;
+ break;
+ case Z_BUF_ERROR:
+ reason = am_buf_error;
+ break;
+ case Z_VERSION_ERROR:
+ reason = am_version_error;
+ break;
+ default:
+ reason = am_unknown_error;
+ break;
+ }
+ return reason;
+}
+
+static void gc_zlib(ErlNifEnv *env, void* data) {
+ zlib_data_t *d = (zlib_data_t*)data;
+
+ if(d->state == ST_DEFLATE) {
+ deflateEnd(&d->s);
+ } else if(d->state == ST_INFLATE) {
+ inflateEnd(&d->s);
+ }
+
+ if(d->state != ST_CLOSED) {
+ enif_ioq_destroy(d->input_queue);
+
+ if(d->stash_env != NULL) {
+ enif_free_env(d->stash_env);
+ }
+
+ d->state = ST_CLOSED;
+ }
+}
+
+static int get_zlib_data(ErlNifEnv *env, ERL_NIF_TERM opaque, zlib_data_t **d) {
+ return enif_get_resource(env, opaque, rtype_zlib, (void **)d);
+}
+
+static int zlib_process_check(ErlNifEnv *env, zlib_data_t *d) {
+ ErlNifPid current_process;
+
+ enif_self(env, &current_process);
+
+ return enif_is_identical(enif_make_pid(env, &current_process),
+ enif_make_pid(env, &d->controlling_process));
+}
+
+static void zlib_reset_input(zlib_data_t *d) {
+ enif_ioq_destroy(d->input_queue);
+ d->input_queue = enif_ioq_create(ERL_NIF_IOQ_NORMAL);
+
+ if(d->stash_env != NULL) {
+ enif_free_env(d->stash_env);
+ d->stash_env = NULL;
+ d->stash_term = NIL;
+ }
+}
+
+static int zlib_flush_queue(int (*codec)(z_stream*, int), ErlNifEnv *env,
+ zlib_data_t *d, size_t input_limit, ErlNifBinary *output_buffer, int flush,
+ size_t *bytes_produced, size_t *bytes_consumed, size_t *bytes_remaining) {
+
+ int vec_len, vec_idx;
+ SysIOVec *input_vec;
+ int res;
+
+ input_vec = enif_ioq_peek(d->input_queue, &vec_len);
+ vec_idx = 0;
+ res = Z_OK;
+
+ *bytes_produced = 0;
+ *bytes_consumed = 0;
+
+ d->s.avail_out = output_buffer->size;
+ d->s.next_out = output_buffer->data;
+
+ while(res == Z_OK && vec_idx < vec_len && *bytes_consumed < input_limit) {
+ size_t timeslice_percent, block_consumed, block_size;
+
+ block_size = MIN(input_vec[vec_idx].iov_len, input_limit);
+
+ d->s.next_in = input_vec[vec_idx].iov_base;
+ d->s.avail_in = block_size;
+
+ res = codec(&d->s, Z_NO_FLUSH);
+
+ ASSERT(d->s.avail_in == 0 || d->s.avail_out == 0 || res != Z_OK);
+
+ block_consumed = block_size - d->s.avail_in;
+ *bytes_consumed += block_consumed;
+
+ if(d->want_input_crc) {
+ d->input_crc =
+ crc32(d->input_crc, input_vec[vec_idx].iov_base, block_consumed);
+ }
+
+ timeslice_percent = (100 * block_consumed) / input_limit;
+ if(enif_consume_timeslice(env, MAX(1, timeslice_percent))) {
+ break;
+ }
+
+ vec_idx++;
+ }
+
+ if(!enif_ioq_deq(d->input_queue, *bytes_consumed, bytes_remaining)) {
+ *bytes_remaining = 0;
+ res = Z_BUF_ERROR;
+ }
+
+ if(res == Z_OK && flush != Z_NO_FLUSH && (*bytes_remaining == 0)) {
+ d->s.next_in = NULL;
+ d->s.avail_in = 0;
+
+ res = codec(&d->s, flush);
+ }
+
+ *bytes_produced = output_buffer->size - d->s.avail_out;
+
+ return res;
+}
+
+static ERL_NIF_TERM zlib_codec(int (*codec)(z_stream*, int),
+ ErlNifEnv *env, zlib_data_t *d,
+ int input_chunk_size,
+ int output_chunk_size,
+ int flush) {
+
+ size_t bytes_produced, bytes_consumed, bytes_remaining;
+ ErlNifBinary output_buffer;
+ int res;
+
+ if(!enif_alloc_binary(output_chunk_size, &output_buffer)) {
+ return zlib_return(env, Z_MEM_ERROR);
+ }
+
+ res = zlib_flush_queue(codec, env, d, input_chunk_size, &output_buffer,
+ flush, &bytes_produced, &bytes_consumed, &bytes_remaining);
+
+ if(res < 0 && res != Z_BUF_ERROR) {
+ enif_release_binary(&output_buffer);
+ return zlib_return(env, res);
+ }
+
+ if(res == Z_STREAM_END) {
+ d->eos_seen = 1;
+ }
+
+ if(d->want_output_crc) {
+ d->output_crc =
+ crc32(d->output_crc, output_buffer.data, bytes_produced);
+ }
+
+ if(bytes_consumed == 0 && bytes_produced == 0 && bytes_remaining != 0) {
+ /* Die if we've made zero progress; this should not happen on
+ * well-formed input. */
+
+ enif_release_binary(&output_buffer);
+ return zlib_return(env, Z_DATA_ERROR);
+ } else {
+ ERL_NIF_TERM flushed_output;
+
+ if(bytes_produced > 0) {
+ if(bytes_produced < output_buffer.size) {
+ enif_realloc_binary(&output_buffer, bytes_produced);
+ }
+
+ flushed_output =
+ enif_make_list1(env, enif_make_binary(env, &output_buffer));
+ } else {
+ enif_release_binary(&output_buffer);
+ flushed_output = enif_make_list(env, 0);
+ }
+
+ if(bytes_remaining == 0 && bytes_produced < output_chunk_size) {
+ return enif_make_tuple2(env, am_finished, flushed_output);
+ } else if(res != Z_NEED_DICT) {
+ return enif_make_tuple2(env, am_continue, flushed_output);
+ }
+
+ return enif_make_tuple3(env, am_need_dictionary,
+ enif_make_int(env, d->s.adler), flushed_output);
+ }
+}
+
+/* zlib nifs */
+
+static ERL_NIF_TERM zlib_getStash(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+
+ if(argc != 1 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ }
+
+ if(d->stash_env == NULL) {
+ return am_empty;
+ }
+
+ return enif_make_tuple2(env, am_ok, enif_make_copy(env, d->stash_term));
+}
+
+static ERL_NIF_TERM zlib_clearStash(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+
+ if(argc != 1 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->stash_env == NULL) {
+ return enif_raise_exception(env, am_error);
+ }
+
+ enif_free_env(d->stash_env);
+ d->stash_env = NULL;
+ d->stash_term = NIL;
+
+ return am_ok;
+}
+
+static ERL_NIF_TERM zlib_setStash(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+
+ if(argc != 2 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->stash_env != NULL) {
+ return enif_raise_exception(env, am_error);
+ }
+
+ d->stash_env = enif_alloc_env();
+ d->stash_term = enif_make_copy(d->stash_env, argv[1]);
+
+ return am_ok;
+}
+
+static ERL_NIF_TERM zlib_open(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ ERL_NIF_TERM result;
+
+ d = (zlib_data_t *) enif_alloc_resource(rtype_zlib, sizeof(zlib_data_t));
+
+ memset(&d->s, 0, sizeof(z_stream));
+
+ enif_self(env, &d->controlling_process);
+
+ d->input_queue = enif_ioq_create(ERL_NIF_IOQ_NORMAL);
+
+ d->s.zalloc = zlib_alloc;
+ d->s.zfree = zlib_free;
+ d->s.opaque = d;
+ d->s.data_type = Z_BINARY;
+
+ d->state = ST_NONE;
+ d->eos_seen = 0;
+
+ d->want_output_crc = 0;
+ d->want_input_crc = 0;
+ d->is_raw_stream = 0;
+
+ d->output_crc = crc32(0L, Z_NULL, 0);
+ d->input_crc = crc32(0L, Z_NULL, 0);
+
+ d->stash_env = NULL;
+ d->stash_term = NIL;
+
+ d->inflateChunk_buffer_size = 4000;
+
+ result = enif_make_resource(env, d);
+ enif_release_resource(d);
+
+ return result;
+}
+
+static ERL_NIF_TERM zlib_close(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+
+ /* strictly speaking not needed since the gc will handle this */
+ if(argc != 1 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state == ST_CLOSED) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+ gc_zlib(env, d);
+
+ return am_ok;
+}
+
+/* deflate */
+
+static ERL_NIF_TERM zlib_deflateInit(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ int level, res;
+
+ if(argc != 2 || !get_zlib_data(env, argv[0], &d) ||
+ !enif_get_int(env, argv[1], &level)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_NONE) {
+ return enif_raise_exception(env, am_already_initialized);
+ }
+
+ res = deflateInit(&d->s, level);
+
+ if(res == Z_OK) {
+ d->state = ST_DEFLATE;
+ d->eos_seen = 0;
+
+ /* FIXME: crc32/1 is documented as returning "the current calculated
+ * checksum," but failed to mention that the old implementation only
+ * calculated it when WindowBits < 0 (See zlib_deflateInit2).
+ *
+ * We could fix this behavior by setting d->want_input_crc to 1 here,
+ * but we've decided to retain this quirk since the performance hit is
+ * quite significant. */
+ d->want_output_crc = 0;
+ d->want_input_crc = 0;
+
+ d->output_crc = crc32(0L, Z_NULL, 0);
+ d->input_crc = crc32(0L, Z_NULL, 0);
+ }
+
+ return zlib_return(env, res);
+}
+
+static ERL_NIF_TERM zlib_deflateInit2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ int level, method, windowBits, memLevel, strategy, res;
+
+ if(argc != 6 || !get_zlib_data(env, argv[0], &d)
+ || !enif_get_int(env, argv[1], &level)
+ || !enif_get_int(env, argv[2], &method)
+ || !enif_get_int(env, argv[3], &windowBits)
+ || !enif_get_int(env, argv[4], &memLevel)
+ || !enif_get_int(env, argv[5], &strategy)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_NONE) {
+ return enif_raise_exception(env, am_already_initialized);
+ }
+
+ res = deflateInit2(&d->s, level, method, windowBits, memLevel, strategy);
+
+ if(res == Z_OK) {
+ d->state = ST_DEFLATE;
+ d->eos_seen = 0;
+
+ d->is_raw_stream = (windowBits < 0);
+
+ d->want_output_crc = 0;
+ d->want_input_crc = d->is_raw_stream;
+
+ d->output_crc = crc32(0L, Z_NULL, 0);
+ d->input_crc = crc32(0L, Z_NULL, 0);
+ }
+
+ return zlib_return(env, res);
+}
+
+static ERL_NIF_TERM zlib_deflateSetDictionary(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ ErlNifBinary bin;
+ int res;
+
+ if(argc != 2 || !get_zlib_data(env, argv[0], &d)
+ || !enif_inspect_iolist_as_binary(env, argv[1], &bin)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_DEFLATE) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+ if((res = deflateSetDictionary(&d->s, bin.data, bin.size)) == Z_OK) {
+ uLong checksum = d->s.adler;
+
+ /* d->s.adler is not updated in raw deflate mode, so we'll calculate it
+ * ourselves in case the user wants to rely on that behavior. */
+ if(d->is_raw_stream) {
+ checksum = adler32(0, bin.data, bin.size);
+ }
+
+ return enif_make_int(env, checksum);
+ }
+
+ return zlib_return(env, res);
+}
+
+static ERL_NIF_TERM zlib_deflateReset(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ int res;
+
+ if(argc != 1 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_DEFLATE) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+ res = deflateReset(&d->s);
+
+ d->input_crc = crc32(0L, Z_NULL, 0);
+ d->eos_seen = 0;
+
+ zlib_reset_input(d);
+
+ return zlib_return(env, res);
+}
+
+static ERL_NIF_TERM zlib_deflateEnd(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ int res;
+
+ if(argc != 1 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_DEFLATE) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+ res = deflateEnd(&d->s);
+
+ if(res == Z_OK && enif_ioq_size(d->input_queue) > 0) {
+ res = Z_DATA_ERROR;
+ }
+
+ zlib_reset_input(d);
+ d->state = ST_NONE;
+
+ return zlib_return(env, res);
+}
+
+static ERL_NIF_TERM zlib_deflateParams(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ int res, level, strategy;
+
+ if(argc != 3 || !get_zlib_data(env, argv[0], &d)
+ || !enif_get_int(env, argv[1], &level)
+ || !enif_get_int(env, argv[2], &strategy)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_DEFLATE) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+ /* deflateParams will flush everything currently in the stream, corrupting
+ * the heap unless it's empty. We therefore pretend to have a full output
+ * buffer, forcing a Z_BUF_ERROR if there's anything left to be flushed. */
+ d->s.avail_out = 0;
+ res = deflateParams(&d->s, level, strategy);
+
+ return zlib_return(env, res);
+}
+
+static ERL_NIF_TERM zlib_deflate(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+
+ int input_chunk_size, output_chunk_size, flush;
+
+ if(argc != 4 || !get_zlib_data(env, argv[0], &d)
+ || !enif_get_int(env, argv[1], &input_chunk_size)
+ || !enif_get_int(env, argv[2], &output_chunk_size)
+ || !enif_get_int(env, argv[3], &flush)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_DEFLATE) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+ return zlib_codec(&deflate, env, d, input_chunk_size, output_chunk_size, flush);
+}
+
+/* inflate */
+
+static ERL_NIF_TERM zlib_inflateInit(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ int res;
+
+ if(argc != 1 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_NONE) {
+ return enif_raise_exception(env, am_already_initialized);
+ }
+
+ res = inflateInit(&d->s);
+
+ if(res == Z_OK) {
+ d->state = ST_INFLATE;
+ d->eos_seen = 0;
+
+ d->want_output_crc = 0;
+ d->want_input_crc = 0;
+ d->is_raw_stream = 0;
+
+ d->output_crc = crc32(0L, Z_NULL, 0);
+ d->input_crc = crc32(0L, Z_NULL, 0);
+ }
+
+ return zlib_return(env, res);
+}
+
+static ERL_NIF_TERM zlib_inflateInit2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ int windowBits, res;
+
+ if(argc != 2 || !get_zlib_data(env, argv[0], &d)
+ || !enif_get_int(env, argv[1], &windowBits)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_NONE) {
+ return enif_raise_exception(env, am_already_initialized);
+ }
+
+ res = inflateInit2(&d->s, windowBits);
+
+ if(res == Z_OK) {
+ d->state = ST_INFLATE;
+ d->eos_seen = 0;
+
+ d->is_raw_stream = (windowBits < 0);
+
+ d->want_output_crc = d->is_raw_stream;
+ d->want_input_crc = 0;
+
+ d->output_crc = crc32(0L, Z_NULL, 0);
+ d->input_crc = crc32(0L, Z_NULL, 0);
+ }
+
+ return zlib_return(env, res);
+}
+
+static ERL_NIF_TERM zlib_inflateSetDictionary(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ ErlNifBinary bin;
+ int res;
+
+ if(argc != 2 || !get_zlib_data(env, argv[0], &d)
+ || !enif_inspect_iolist_as_binary(env, argv[1], &bin)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_INFLATE) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+ res = inflateSetDictionary(&d->s, bin.data, bin.size);
+
+ return zlib_return(env, res);
+}
+
+#ifdef HAVE_ZLIB_INFLATEGETDICTIONARY
+/* Work around broken build system with runtime version test */
+static int zlib_supports_inflateGetDictionary(void) {
+ static int supportsGetDictionary = -1;
+
+#if defined(__APPLE__) && defined(__MACH__)
+ if(supportsGetDictionary < 0) {
+ unsigned int v[4] = {0, 0, 0, 0};
+ unsigned hexver;
+
+ sscanf(zlibVersion(), "%u.%u.%u.%u", &v[0], &v[1], &v[2], &v[3]);
+
+ hexver = (v[0] << (8*3)) | (v[1] << (8*2)) | (v[2] << (8)) | v[3];
+ supportsGetDictionary = (hexver >= 0x1020701); /* 1.2.7.1 */
+ }
+#endif
+
+ return supportsGetDictionary;
+}
+#endif
+
+static ERL_NIF_TERM zlib_inflateGetDictionary(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+
+ if(argc != 1 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_INFLATE) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+#ifdef HAVE_ZLIB_INFLATEGETDICTIONARY
+ if(zlib_supports_inflateGetDictionary()) {
+ ErlNifBinary obin;
+ uInt len;
+ int res;
+
+ enif_alloc_binary(INFL_DICT_SZ, &obin);
+ len = 0;
+
+ if((res = inflateGetDictionary(&d->s, obin.data, &len)) < 0) {
+ enif_release_binary(&obin);
+ return zlib_return(env, res);
+ }
+
+ enif_realloc_binary(&obin, (size_t)len);
+ return enif_make_binary(env, &obin);
+ }
+#endif
+
+ return enif_raise_exception(env, am_not_supported);
+}
+
+static ERL_NIF_TERM zlib_inflateReset(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ int res;
+
+ if(argc != 1 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_INFLATE) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+ res = inflateReset(&d->s);
+
+ d->output_crc = crc32(0L, Z_NULL, 0);
+ d->eos_seen = 0;
+
+ zlib_reset_input(d);
+
+ return zlib_return(env, res);
+}
+
+static ERL_NIF_TERM zlib_inflateEnd(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+ int res;
+
+ if(argc != 1 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_INFLATE) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+ res = inflateEnd(&d->s);
+
+ if(res == Z_OK && (!d->eos_seen || enif_ioq_size(d->input_queue) > 0)) {
+ res = Z_DATA_ERROR;
+ }
+
+ zlib_reset_input(d);
+ d->state = ST_NONE;
+
+ return zlib_return(env, res);
+}
+
+static ERL_NIF_TERM zlib_inflate(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+
+ int input_chunk_size, output_chunk_size, flush;
+
+ if(argc != 4 || !get_zlib_data(env, argv[0], &d)
+ || !enif_get_int(env, argv[1], &input_chunk_size)
+ || !enif_get_int(env, argv[2], &output_chunk_size)
+ || !enif_get_int(env, argv[3], &flush)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_INFLATE) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+ return zlib_codec(&inflate, env, d, input_chunk_size, output_chunk_size, flush);
+}
+
+static ERL_NIF_TERM zlib_crc32(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+
+ if(argc != 1 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ }
+
+ if(d->state == ST_DEFLATE) {
+ return enif_make_ulong(env, d->input_crc);
+ } else if(d->state == ST_INFLATE) {
+ return enif_make_ulong(env, d->output_crc);
+ }
+
+ return enif_raise_exception(env, am_not_initialized);
+}
+
+static ERL_NIF_TERM zlib_getBufSize(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+
+ if(argc != 1 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ }
+
+ return enif_make_int(env, d->inflateChunk_buffer_size);
+}
+
+static ERL_NIF_TERM zlib_setBufSize(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+
+ if(argc != 2 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ }
+
+ if(!enif_get_int(env, argv[1], &d->inflateChunk_buffer_size)) {
+ return enif_make_badarg(env);
+ }
+
+ return am_ok;
+}
+
+static ERL_NIF_TERM zlib_enqueue_input(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
+ zlib_data_t *d;
+
+ ErlNifIOVec prealloc, *iovec = &prealloc;
+ ERL_NIF_TERM tail;
+
+ if(argc != 2 || !get_zlib_data(env, argv[0], &d)) {
+ return enif_make_badarg(env);
+ } else if(!zlib_process_check(env, d)) {
+ return enif_raise_exception(env, am_not_on_controlling_process);
+ } else if(d->state != ST_DEFLATE && d->state != ST_INFLATE) {
+ return enif_raise_exception(env, am_not_initialized);
+ }
+
+ if(!enif_inspect_iovec(env, 256, argv[1], &tail, &iovec)) {
+ return enif_make_badarg(env);
+ } else if(!enif_ioq_enqv(d->input_queue, iovec, 0)) {
+ return enif_make_badarg(env);
+ }
+
+ if(!enif_is_empty_list(env, tail)) {
+ return enif_make_tuple2(env, am_continue, tail);
+ }
+
+ return am_ok;
+}
diff --git a/erts/emulator/pcre/local_config.h b/erts/emulator/pcre/local_config.h
index e90f4dcada..c6af423d72 100644
--- a/erts/emulator/pcre/local_config.h
+++ b/erts/emulator/pcre/local_config.h
@@ -86,4 +86,4 @@
#define SUPPORT_UTF
/* Version number of package */
-#define VERSION "8.40"
+#define VERSION "8.41"
diff --git a/erts/emulator/pcre/pcre-8.40.tar.bz2 b/erts/emulator/pcre/pcre-8.40.tar.bz2
deleted file mode 100644
index 6147917f4e..0000000000
--- a/erts/emulator/pcre/pcre-8.40.tar.bz2
+++ /dev/null
Binary files differ
diff --git a/erts/emulator/pcre/pcre-8.41.tar.bz2 b/erts/emulator/pcre/pcre-8.41.tar.bz2
new file mode 100644
index 0000000000..1798432dc9
--- /dev/null
+++ b/erts/emulator/pcre/pcre-8.41.tar.bz2
Binary files differ
diff --git a/erts/emulator/pcre/pcre.h b/erts/emulator/pcre/pcre.h
index 9cbd9c0293..ab8f40cfc1 100644
--- a/erts/emulator/pcre/pcre.h
+++ b/erts/emulator/pcre/pcre.h
@@ -43,9 +43,9 @@ POSSIBILITY OF SUCH DAMAGE.
/* The current PCRE version information. */
#define PCRE_MAJOR 8
-#define PCRE_MINOR 40
+#define PCRE_MINOR 41
#define PCRE_PRERELEASE
-#define PCRE_DATE 2017-01-11
+#define PCRE_DATE 2017-07-05
/* When an application links to a PCRE DLL in Windows, the symbols that are
imported have to be identified as such. When building PCRE, the appropriate
diff --git a/erts/emulator/pcre/pcre_compile.c b/erts/emulator/pcre/pcre_compile.c
index 6e841c9cf8..e79284ab79 100644
--- a/erts/emulator/pcre/pcre_compile.c
+++ b/erts/emulator/pcre/pcre_compile.c
@@ -5740,6 +5740,21 @@ for (;; ptr++)
ptr = p - 1; /* Character before the next significant one. */
}
+ /* We also need to skip over (?# comments, which are not dependent on
+ extended mode. */
+
+ if (ptr[1] == CHAR_LEFT_PARENTHESIS && ptr[2] == CHAR_QUESTION_MARK &&
+ ptr[3] == CHAR_NUMBER_SIGN)
+ {
+ ptr += 4;
+ while (*ptr != CHAR_NULL && *ptr != CHAR_RIGHT_PARENTHESIS) ptr++;
+ if (*ptr == CHAR_NULL)
+ {
+ *errorcodeptr = ERR18;
+ goto FAILED;
+ }
+ }
+
/* If the next character is '+', we have a possessive quantifier. This
implies greediness, whatever the setting of the PCRE_UNGREEDY option.
If the next character is '?' this is a minimizing repeat, by default,
@@ -8211,7 +8226,6 @@ for (;; ptr++)
if (mclength == 1 || req_caseopt == 0)
{
- firstchar = mcbuffer[0] | req_caseopt;
firstchar = mcbuffer[0];
firstcharflags = req_caseopt;
diff --git a/erts/emulator/pcre/pcre_dfa_exec.c b/erts/emulator/pcre/pcre_dfa_exec.c
index 529f40685b..c859d67fc7 100644
--- a/erts/emulator/pcre/pcre_dfa_exec.c
+++ b/erts/emulator/pcre/pcre_dfa_exec.c
@@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language (but see
below for why this module is different).
Written by Philip Hazel
- Copyright (c) 1997-2014 University of Cambridge
+ Copyright (c) 1997-2017 University of Cambridge
-----------------------------------------------------------------------------
Redistribution and use in source and binary forms, with or without
@@ -2626,7 +2626,7 @@ for (;;)
if (isinclass)
{
int max = (int)GET2(ecode, 1 + IMM2_SIZE);
- if (*ecode == OP_CRPOSRANGE)
+ if (*ecode == OP_CRPOSRANGE && count >= (int)GET2(ecode, 1))
{
active_count--; /* Remove non-match possibility */
next_active_state--;
diff --git a/erts/emulator/pcre/pcre_exec.c b/erts/emulator/pcre/pcre_exec.c
index 0f682d3daf..6708ba92a6 100644
--- a/erts/emulator/pcre/pcre_exec.c
+++ b/erts/emulator/pcre/pcre_exec.c
@@ -755,7 +755,7 @@ if (ecode == NULL)
return match((PCRE_PUCHAR)&rdepth, NULL, NULL, 0, NULL, NULL, 1);
else
{
- int len = (char *)&rdepth - (char *)eptr;
+ int len = (int)((char *)&rdepth - (char *)eptr);
return (len > 0)? -len : len;
}
}
diff --git a/erts/emulator/pcre/pcre_internal.h b/erts/emulator/pcre/pcre_internal.h
index cc4f171438..c84dcb5a38 100644
--- a/erts/emulator/pcre/pcre_internal.h
+++ b/erts/emulator/pcre/pcre_internal.h
@@ -2791,6 +2791,9 @@ extern const pcre_uint8 PRIV(ucd_stage1)[];
extern const pcre_uint16 PRIV(ucd_stage2)[];
extern const pcre_uint32 PRIV(ucp_gentype)[];
extern const pcre_uint32 PRIV(ucp_gbtable)[];
+#ifdef COMPILE_PCRE32
+extern const ucd_record PRIV(dummy_ucd_record)[];
+#endif
#ifdef SUPPORT_JIT
extern const int PRIV(ucp_typerange)[];
#endif
@@ -2799,10 +2802,16 @@ extern const int PRIV(ucp_typerange)[];
/* UCD access macros */
#define UCD_BLOCK_SIZE 128
-#define GET_UCD(ch) (PRIV(ucd_records) + \
+#define REAL_GET_UCD(ch) (PRIV(ucd_records) + \
PRIV(ucd_stage2)[PRIV(ucd_stage1)[(int)(ch) / UCD_BLOCK_SIZE] * \
UCD_BLOCK_SIZE + (int)(ch) % UCD_BLOCK_SIZE])
+#ifdef COMPILE_PCRE32
+#define GET_UCD(ch) ((ch > 0x10ffff)? PRIV(dummy_ucd_record) : REAL_GET_UCD(ch))
+#else
+#define GET_UCD(ch) REAL_GET_UCD(ch)
+#endif
+
#define UCD_CHARTYPE(ch) GET_UCD(ch)->chartype
#define UCD_SCRIPT(ch) GET_UCD(ch)->script
#define UCD_CATEGORY(ch) PRIV(ucp_gentype)[UCD_CHARTYPE(ch)]
diff --git a/erts/emulator/pcre/pcre_jit_compile.c b/erts/emulator/pcre/pcre_jit_compile.c
index 89400498f0..932ca2c389 100644
--- a/erts/emulator/pcre/pcre_jit_compile.c
+++ b/erts/emulator/pcre/pcre_jit_compile.c
@@ -487,7 +487,7 @@ typedef struct compare_context {
#undef CMP
/* Used for accessing the elements of the stack. */
-#define STACK(i) ((-(i) - 1) * (int)sizeof(sljit_sw))
+#define STACK(i) ((i) * (int)sizeof(sljit_sw))
#define TMP1 SLJIT_R0
#define TMP2 SLJIT_R2
@@ -552,13 +552,15 @@ the start pointers when the end of the capturing group has not yet reached. */
sljit_emit_cmp(compiler, (type), (src1), (src1w), (src2), (src2w))
#define CMPTO(type, src1, src1w, src2, src2w, label) \
sljit_set_label(sljit_emit_cmp(compiler, (type), (src1), (src1w), (src2), (src2w)), (label))
-#define OP_FLAGS(op, dst, dstw, src, srcw, type) \
- sljit_emit_op_flags(compiler, (op), (dst), (dstw), (src), (srcw), (type))
+#define OP_FLAGS(op, dst, dstw, type) \
+ sljit_emit_op_flags(compiler, (op), (dst), (dstw), (type))
#define GET_LOCAL_BASE(dst, dstw, offset) \
sljit_get_local_base(compiler, (dst), (dstw), (offset))
#define READ_CHAR_MAX 0x7fffffff
+#define INVALID_UTF_CHAR 888
+
static pcre_uchar *bracketend(pcre_uchar *cc)
{
SLJIT_ASSERT((*cc >= OP_ASSERT && *cc <= OP_ASSERTBACK_NOT) || (*cc >= OP_ONCE && *cc <= OP_SCOND));
@@ -784,7 +786,7 @@ switch(*cc)
default:
/* All opcodes are supported now! */
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
return NULL;
}
}
@@ -1660,9 +1662,9 @@ while (cc < ccend)
{
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0));
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -OVECTOR(0));
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
setsom_found = TRUE;
}
cc += 1;
@@ -1676,9 +1678,9 @@ while (cc < ccend)
{
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->mark_ptr);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
setmark_found = TRUE;
}
cc += 1 + 2 + cc[1];
@@ -1689,27 +1691,27 @@ while (cc < ccend)
{
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0));
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -OVECTOR(0));
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
setsom_found = TRUE;
}
if (common->mark_ptr != 0 && !setmark_found)
{
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->mark_ptr);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
setmark_found = TRUE;
}
if (common->capture_last_ptr != 0 && !capture_last_found)
{
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->capture_last_ptr);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
capture_last_found = TRUE;
}
cc += 1 + LINK_SIZE;
@@ -1723,20 +1725,20 @@ while (cc < ccend)
{
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->capture_last_ptr);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
capture_last_found = TRUE;
}
offset = (GET2(cc, 1 + LINK_SIZE)) << 1;
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, OVECTOR(offset));
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset));
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1));
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP2, 0);
- stackpos += (int)sizeof(sljit_sw);
+ stackpos -= (int)sizeof(sljit_sw);
cc += 1 + LINK_SIZE + IMM2_SIZE;
break;
@@ -1887,18 +1889,17 @@ BOOL tmp1empty = TRUE;
BOOL tmp2empty = TRUE;
pcre_uchar *alternative;
enum {
- start,
loop,
end
} status;
-status = save ? start : loop;
-stackptr = STACK(stackptr - 2);
+status = loop;
+stackptr = STACK(stackptr);
stacktop = STACK(stacktop - 1);
if (!save)
{
- stackptr += (needs_control_head ? 2 : 1) * sizeof(sljit_sw);
+ stacktop -= (needs_control_head ? 2 : 1) * sizeof(sljit_sw);
if (stackptr < stacktop)
{
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), stackptr);
@@ -1914,196 +1915,186 @@ if (!save)
/* The tmp1next must be TRUE in either way. */
}
+SLJIT_ASSERT(common->recursive_head_ptr != 0);
+
do
{
count = 0;
- switch(status)
+ if (cc >= ccend)
{
- case start:
- SLJIT_ASSERT(save && common->recursive_head_ptr != 0);
+ if (!save)
+ break;
+
count = 1;
srcw[0] = common->recursive_head_ptr;
if (needs_control_head)
{
SLJIT_ASSERT(common->control_head_ptr != 0);
count = 2;
- srcw[1] = common->control_head_ptr;
+ srcw[0] = common->control_head_ptr;
+ srcw[1] = common->recursive_head_ptr;
+ }
+ status = end;
+ }
+ else switch(*cc)
+ {
+ case OP_KET:
+ if (PRIVATE_DATA(cc) != 0)
+ {
+ count = 1;
+ srcw[0] = PRIVATE_DATA(cc);
+ SLJIT_ASSERT(PRIVATE_DATA(cc + 1) != 0);
+ cc += PRIVATE_DATA(cc + 1);
}
- status = loop;
+ cc += 1 + LINK_SIZE;
+ break;
+
+ case OP_ASSERT:
+ case OP_ASSERT_NOT:
+ case OP_ASSERTBACK:
+ case OP_ASSERTBACK_NOT:
+ case OP_ONCE:
+ case OP_ONCE_NC:
+ case OP_BRAPOS:
+ case OP_SBRA:
+ case OP_SBRAPOS:
+ case OP_SCOND:
+ count = 1;
+ srcw[0] = PRIVATE_DATA(cc);
+ SLJIT_ASSERT(srcw[0] != 0);
+ cc += 1 + LINK_SIZE;
break;
- case loop:
- if (cc >= ccend)
+ case OP_CBRA:
+ case OP_SCBRA:
+ if (common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0)
{
- status = end;
- break;
+ count = 1;
+ srcw[0] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE));
}
+ cc += 1 + LINK_SIZE + IMM2_SIZE;
+ break;
- switch(*cc)
- {
- case OP_KET:
- if (PRIVATE_DATA(cc) != 0)
- {
- count = 1;
- srcw[0] = PRIVATE_DATA(cc);
- SLJIT_ASSERT(PRIVATE_DATA(cc + 1) != 0);
- cc += PRIVATE_DATA(cc + 1);
- }
- cc += 1 + LINK_SIZE;
- break;
+ case OP_CBRAPOS:
+ case OP_SCBRAPOS:
+ count = 2;
+ srcw[0] = PRIVATE_DATA(cc);
+ srcw[1] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE));
+ SLJIT_ASSERT(srcw[0] != 0 && srcw[1] != 0);
+ cc += 1 + LINK_SIZE + IMM2_SIZE;
+ break;
- case OP_ASSERT:
- case OP_ASSERT_NOT:
- case OP_ASSERTBACK:
- case OP_ASSERTBACK_NOT:
- case OP_ONCE:
- case OP_ONCE_NC:
- case OP_BRAPOS:
- case OP_SBRA:
- case OP_SBRAPOS:
- case OP_SCOND:
+ case OP_COND:
+ /* Might be a hidden SCOND. */
+ alternative = cc + GET(cc, 1);
+ if (*alternative == OP_KETRMAX || *alternative == OP_KETRMIN)
+ {
count = 1;
srcw[0] = PRIVATE_DATA(cc);
SLJIT_ASSERT(srcw[0] != 0);
- cc += 1 + LINK_SIZE;
- break;
-
- case OP_CBRA:
- case OP_SCBRA:
- if (common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0)
- {
- count = 1;
- srcw[0] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE));
- }
- cc += 1 + LINK_SIZE + IMM2_SIZE;
- break;
+ }
+ cc += 1 + LINK_SIZE;
+ break;
- case OP_CBRAPOS:
- case OP_SCBRAPOS:
- count = 2;
+ CASE_ITERATOR_PRIVATE_DATA_1
+ if (PRIVATE_DATA(cc))
+ {
+ count = 1;
srcw[0] = PRIVATE_DATA(cc);
- srcw[1] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE));
- SLJIT_ASSERT(srcw[0] != 0 && srcw[1] != 0);
- cc += 1 + LINK_SIZE + IMM2_SIZE;
- break;
-
- case OP_COND:
- /* Might be a hidden SCOND. */
- alternative = cc + GET(cc, 1);
- if (*alternative == OP_KETRMAX || *alternative == OP_KETRMIN)
- {
- count = 1;
- srcw[0] = PRIVATE_DATA(cc);
- SLJIT_ASSERT(srcw[0] != 0);
- }
- cc += 1 + LINK_SIZE;
- break;
-
- CASE_ITERATOR_PRIVATE_DATA_1
- if (PRIVATE_DATA(cc))
- {
- count = 1;
- srcw[0] = PRIVATE_DATA(cc);
- }
- cc += 2;
+ }
+ cc += 2;
#ifdef SUPPORT_UTF
- if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
+ if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
- break;
+ break;
- CASE_ITERATOR_PRIVATE_DATA_2A
- if (PRIVATE_DATA(cc))
- {
- count = 2;
- srcw[0] = PRIVATE_DATA(cc);
- srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw);
- }
- cc += 2;
+ CASE_ITERATOR_PRIVATE_DATA_2A
+ if (PRIVATE_DATA(cc))
+ {
+ count = 2;
+ srcw[0] = PRIVATE_DATA(cc);
+ srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw);
+ }
+ cc += 2;
#ifdef SUPPORT_UTF
- if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
+ if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
- break;
+ break;
- CASE_ITERATOR_PRIVATE_DATA_2B
- if (PRIVATE_DATA(cc))
- {
- count = 2;
- srcw[0] = PRIVATE_DATA(cc);
- srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw);
- }
- cc += 2 + IMM2_SIZE;
+ CASE_ITERATOR_PRIVATE_DATA_2B
+ if (PRIVATE_DATA(cc))
+ {
+ count = 2;
+ srcw[0] = PRIVATE_DATA(cc);
+ srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw);
+ }
+ cc += 2 + IMM2_SIZE;
#ifdef SUPPORT_UTF
- if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
+ if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
#endif
- break;
+ break;
- CASE_ITERATOR_TYPE_PRIVATE_DATA_1
- if (PRIVATE_DATA(cc))
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_1
+ if (PRIVATE_DATA(cc))
+ {
+ count = 1;
+ srcw[0] = PRIVATE_DATA(cc);
+ }
+ cc += 1;
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_2A
+ if (PRIVATE_DATA(cc))
+ {
+ count = 2;
+ srcw[0] = PRIVATE_DATA(cc);
+ srcw[1] = srcw[0] + sizeof(sljit_sw);
+ }
+ cc += 1;
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_2B
+ if (PRIVATE_DATA(cc))
+ {
+ count = 2;
+ srcw[0] = PRIVATE_DATA(cc);
+ srcw[1] = srcw[0] + sizeof(sljit_sw);
+ }
+ cc += 1 + IMM2_SIZE;
+ break;
+
+ case OP_CLASS:
+ case OP_NCLASS:
+#if defined SUPPORT_UTF || !defined COMPILE_PCRE8
+ case OP_XCLASS:
+ size = (*cc == OP_XCLASS) ? GET(cc, 1) : 1 + 32 / (int)sizeof(pcre_uchar);
+#else
+ size = 1 + 32 / (int)sizeof(pcre_uchar);
+#endif
+ if (PRIVATE_DATA(cc))
+ switch(get_class_iterator_size(cc + size))
{
+ case 1:
count = 1;
srcw[0] = PRIVATE_DATA(cc);
- }
- cc += 1;
- break;
+ break;
- CASE_ITERATOR_TYPE_PRIVATE_DATA_2A
- if (PRIVATE_DATA(cc))
- {
+ case 2:
count = 2;
srcw[0] = PRIVATE_DATA(cc);
srcw[1] = srcw[0] + sizeof(sljit_sw);
- }
- cc += 1;
- break;
+ break;
- CASE_ITERATOR_TYPE_PRIVATE_DATA_2B
- if (PRIVATE_DATA(cc))
- {
- count = 2;
- srcw[0] = PRIVATE_DATA(cc);
- srcw[1] = srcw[0] + sizeof(sljit_sw);
+ default:
+ SLJIT_UNREACHABLE();
+ break;
}
- cc += 1 + IMM2_SIZE;
- break;
-
- case OP_CLASS:
- case OP_NCLASS:
-#if defined SUPPORT_UTF || !defined COMPILE_PCRE8
- case OP_XCLASS:
- size = (*cc == OP_XCLASS) ? GET(cc, 1) : 1 + 32 / (int)sizeof(pcre_uchar);
-#else
- size = 1 + 32 / (int)sizeof(pcre_uchar);
-#endif
- if (PRIVATE_DATA(cc))
- switch(get_class_iterator_size(cc + size))
- {
- case 1:
- count = 1;
- srcw[0] = PRIVATE_DATA(cc);
- break;
-
- case 2:
- count = 2;
- srcw[0] = PRIVATE_DATA(cc);
- srcw[1] = srcw[0] + sizeof(sljit_sw);
- break;
-
- default:
- SLJIT_ASSERT_STOP();
- break;
- }
- cc += size;
- break;
-
- default:
- cc = next_opcode(common, cc);
- SLJIT_ASSERT(cc != NULL);
- break;
- }
+ cc += size;
break;
- case end:
- SLJIT_ASSERT_STOP();
+ default:
+ cc = next_opcode(common, cc);
+ SLJIT_ASSERT(cc != NULL);
break;
}
@@ -2312,7 +2303,7 @@ static SLJIT_INLINE void count_match(compiler_common *common)
{
DEFINE_COMPILER;
-OP2(SLJIT_SUB | SLJIT_SET_E, COUNT_MATCH, 0, COUNT_MATCH, 0, SLJIT_IMM, 1);
+OP2(SLJIT_SUB | SLJIT_SET_Z, COUNT_MATCH, 0, COUNT_MATCH, 0, SLJIT_IMM, 1);
add_jump(compiler, &common->calllimit, JUMP(SLJIT_ZERO));
}
@@ -2322,7 +2313,7 @@ static SLJIT_INLINE void allocate_stack(compiler_common *common, int size)
DEFINE_COMPILER;
SLJIT_ASSERT(size > 0);
-OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw));
+OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw));
#ifdef DESTROY_REGISTERS
OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 12345);
OP1(SLJIT_MOV, TMP3, 0, TMP1, 0);
@@ -2330,7 +2321,7 @@ OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, TMP1, 0);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, TMP1, 0);
#endif
-add_stub(common, CMP(SLJIT_GREATER, STACK_TOP, 0, STACK_LIMIT, 0));
+add_stub(common, CMP(SLJIT_LESS, STACK_TOP, 0, STACK_LIMIT, 0));
}
static SLJIT_INLINE void free_stack(compiler_common *common, int size)
@@ -2338,7 +2329,7 @@ static SLJIT_INLINE void free_stack(compiler_common *common, int size)
DEFINE_COMPILER;
SLJIT_ASSERT(size > 0);
-OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw));
+OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw));
}
static sljit_uw * allocate_read_only_data(compiler_common *common, sljit_uw size)
@@ -2396,7 +2387,7 @@ else
OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1);
loop = LABEL();
OP1(SLJIT_MOVU, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw), SLJIT_R0, 0);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, loop);
}
}
@@ -2434,7 +2425,7 @@ else
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2);
loop = LABEL();
OP1(SLJIT_MOVU, SLJIT_MEM1(TMP2), sizeof(sljit_sw), TMP1, 0);
- OP2(SLJIT_SUB | SLJIT_SET_E, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, loop);
}
@@ -2452,22 +2443,22 @@ static sljit_sw SLJIT_CALL do_search_mark(sljit_sw *current, const pcre_uchar *s
{
while (current != NULL)
{
- switch (current[-2])
+ switch (current[1])
{
case type_then_trap:
break;
case type_mark:
- if (STRCMP_UC_UC(skip_arg, (pcre_uchar *)current[-3]) == 0)
- return current[-4];
+ if (STRCMP_UC_UC(skip_arg, (pcre_uchar *)current[2]) == 0)
+ return current[3];
break;
default:
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
break;
}
- SLJIT_ASSERT(current > (sljit_sw*)current[-1]);
- current = (sljit_sw*)current[-1];
+ SLJIT_ASSERT(current[0] == 0 || current < (sljit_sw*)current[0]);
+ current = (sljit_sw*)current[0];
}
return -1;
}
@@ -2501,7 +2492,7 @@ OP2(SLJIT_ADD, SLJIT_S0, 0, SLJIT_S0, 0, SLJIT_IMM, sizeof(sljit_sw));
OP2(SLJIT_ASHR, SLJIT_S1, 0, SLJIT_S1, 0, SLJIT_IMM, UCHAR_SHIFT);
#endif
OP1(SLJIT_MOVU_S32, SLJIT_MEM1(SLJIT_R2), sizeof(int), SLJIT_S1, 0);
-OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
+OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, loop);
JUMPHERE(early_quit);
@@ -3106,8 +3097,8 @@ if (common->utf)
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
/* Skip low surrogate if necessary. */
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xdc00);
- OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xdc00);
+ OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP1, 0);
return;
@@ -3126,6 +3117,7 @@ struct sljit_jump *jump;
if (nltype == NLTYPE_ANY)
{
add_jump(compiler, &common->anynewline, JUMP(SLJIT_FAST_CALL));
+ sljit_set_current_flags(compiler, SLJIT_SET_Z);
add_jump(compiler, backtracks, JUMP(jumpifmatch ? SLJIT_NOT_ZERO : SLJIT_ZERO));
}
else if (nltype == NLTYPE_ANYCRLF)
@@ -3167,7 +3159,7 @@ OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f);
OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0);
/* Searching for the first zero. */
-OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x800);
+OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x800);
jump = JUMP(SLJIT_NOT_ZERO);
/* Two byte sequence. */
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
@@ -3181,7 +3173,7 @@ OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 6);
OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f);
OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0);
-OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x10000);
+OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x10000);
jump = JUMP(SLJIT_NOT_ZERO);
/* Three byte sequence. */
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2));
@@ -3215,15 +3207,15 @@ OP2(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_IMM, 0x3f);
OP2(SLJIT_OR, TMP1, 0, TMP1, 0, TMP2, 0);
/* Searching for the first zero. */
-OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x800);
+OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x800);
jump = JUMP(SLJIT_NOT_ZERO);
/* Two byte sequence. */
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
sljit_emit_fast_return(compiler, RETURN_ADDR, 0);
JUMPHERE(jump);
-OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x400);
-OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_NOT_ZERO);
+OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x400);
+OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_NOT_ZERO);
/* This code runs only in 8 bit mode. No need to shift the value. */
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP2, 0);
OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
@@ -3246,7 +3238,7 @@ struct sljit_jump *compare;
sljit_emit_fast_enter(compiler, RETURN_ADDR, 0);
-OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, 0x20);
+OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, 0x20);
jump = JUMP(SLJIT_NOT_ZERO);
/* Two byte sequence. */
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0));
@@ -3287,10 +3279,30 @@ static void do_getucd(compiler_common *common)
/* Search the UCD record for the character comes in TMP1.
Returns chartype in TMP1 and UCD offset in TMP2. */
DEFINE_COMPILER;
+#ifdef COMPILE_PCRE32
+struct sljit_jump *jump;
+#endif
+
+#if defined SLJIT_DEBUG && SLJIT_DEBUG
+/* dummy_ucd_record */
+const ucd_record *record = GET_UCD(INVALID_UTF_CHAR);
+SLJIT_ASSERT(record->script == ucp_Common && record->chartype == ucp_Cn && record->gbprop == ucp_gbOther);
+SLJIT_ASSERT(record->caseset == 0 && record->other_case == 0);
+#endif
SLJIT_ASSERT(UCD_BLOCK_SIZE == 128 && sizeof(ucd_record) == 8);
sljit_emit_fast_enter(compiler, RETURN_ADDR, 0);
+
+#ifdef COMPILE_PCRE32
+if (!common->utf)
+ {
+ jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0x10ffff + 1);
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, INVALID_UTF_CHAR);
+ JUMPHERE(jump);
+ }
+#endif
+
OP2(SLJIT_LSHR, TMP2, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_SHIFT);
OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_stage1));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_MASK);
@@ -3365,8 +3377,8 @@ if (newlinecheck)
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
end = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0);
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, common->newline & 0xff);
- OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, common->newline & 0xff);
+ OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL);
#if defined COMPILE_PCRE16 || defined COMPILE_PCRE32
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, UCHAR_SHIFT);
#endif
@@ -3403,8 +3415,8 @@ if (common->utf)
{
singlechar = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xd800);
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800);
- OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800);
+ OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0);
JUMPHERE(singlechar);
@@ -3853,7 +3865,7 @@ while (TRUE)
}
}
-#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86)
+#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) && !(defined SUPPORT_VALGRIND)
static sljit_s32 character_to_int32(pcre_uchar chr)
{
@@ -4019,6 +4031,7 @@ instruction[0] = 0x0f;
instruction[1] = 0xbc;
instruction[2] = 0xc0 | (tmp1_ind << 3) | tmp1_ind;
sljit_emit_op_custom(compiler, instruction, 3);
+sljit_set_current_flags(compiler, SLJIT_SET_Z);
nomatch = JUMP(SLJIT_ZERO);
@@ -4119,6 +4132,7 @@ instruction[0] = 0x0f;
instruction[1] = 0xbc;
instruction[2] = 0xc0 | (tmp1_ind << 3) | tmp1_ind;
sljit_emit_op_custom(compiler, instruction, 3);
+sljit_set_current_flags(compiler, SLJIT_SET_Z);
JUMPTO(SLJIT_ZERO, start);
@@ -4155,18 +4169,8 @@ if (has_match_end)
OP1(SLJIT_MOV, TMP3, 0, STR_END, 0);
OP2(SLJIT_ADD, STR_END, 0, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr, SLJIT_IMM, IN_UCHARS(offset + 1));
-#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86)
- if (sljit_x86_is_cmov_available())
- {
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, STR_END, 0, TMP3, 0);
- sljit_x86_emit_cmov(compiler, SLJIT_GREATER, STR_END, TMP3, 0);
- }
-#endif
- {
- quit = CMP(SLJIT_LESS_EQUAL, STR_END, 0, TMP3, 0);
- OP1(SLJIT_MOV, STR_END, 0, TMP3, 0);
- JUMPHERE(quit);
- }
+ OP2(SLJIT_SUB | SLJIT_SET_GREATER, SLJIT_UNUSED, 0, STR_END, 0, TMP3, 0);
+ sljit_emit_cmov(compiler, SLJIT_GREATER, STR_END, TMP3, 0);
}
#if defined SUPPORT_UTF && !defined COMPILE_PCRE32
@@ -4174,11 +4178,11 @@ if (common->utf && offset > 0)
utf_start = LABEL();
#endif
-#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86)
+#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) && !(defined SUPPORT_VALGRIND)
/* SSE2 accelerated first character search. */
-if (sljit_x86_is_sse2_available())
+if (sljit_has_cpu_feature(SLJIT_HAS_SSE2))
{
fast_forward_first_char2_sse2(common, char1, char2);
@@ -4213,16 +4217,16 @@ if (sljit_x86_is_sse2_available())
if (offset > 0)
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(offset));
}
- else if (sljit_x86_is_cmov_available())
- {
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, STR_PTR, 0, STR_END, 0);
- sljit_x86_emit_cmov(compiler, SLJIT_GREATER_EQUAL, STR_PTR, has_match_end ? SLJIT_MEM1(SLJIT_SP) : STR_END, has_match_end ? common->match_end_ptr : 0);
- }
else
{
- quit = CMP(SLJIT_LESS, STR_PTR, 0, STR_END, 0);
- OP1(SLJIT_MOV, STR_PTR, 0, has_match_end ? SLJIT_MEM1(SLJIT_SP) : STR_END, has_match_end ? common->match_end_ptr : 0);
- JUMPHERE(quit);
+ OP2(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, SLJIT_UNUSED, 0, STR_PTR, 0, STR_END, 0);
+ if (has_match_end)
+ {
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr);
+ sljit_emit_cmov(compiler, SLJIT_GREATER_EQUAL, STR_PTR, TMP1, 0);
+ }
+ else
+ sljit_emit_cmov(compiler, SLJIT_GREATER_EQUAL, STR_PTR, STR_END, 0);
}
if (has_match_end)
@@ -4249,10 +4253,10 @@ else
}
else
{
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, char1);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, char2);
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, char1);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, char2);
+ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
found = JUMP(SLJIT_NOT_ZERO);
}
}
@@ -4571,8 +4575,8 @@ if (common->nltype == NLTYPE_FIXED && common->newline > 255)
firstchar = CMP(SLJIT_LESS_EQUAL, STR_PTR, 0, TMP2, 0);
OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(2));
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, STR_PTR, 0, TMP1, 0);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_GREATER_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, SLJIT_UNUSED, 0, STR_PTR, 0, TMP1, 0);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER_EQUAL);
#if defined COMPILE_PCRE16 || defined COMPILE_PCRE32
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, UCHAR_SHIFT);
#endif
@@ -4616,8 +4620,8 @@ if (common->nltype == NLTYPE_ANY || common->nltype == NLTYPE_ANYCRLF)
JUMPHERE(foundcr);
notfoundnl = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0);
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), 0);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, CHAR_NL);
- OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, CHAR_NL);
+ OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL);
#if defined COMPILE_PCRE16 || defined COMPILE_PCRE32
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, UCHAR_SHIFT);
#endif
@@ -4670,7 +4674,7 @@ if (!check_class_ranges(common, start_bits, (start_bits[31] & 0x80) != 0, TRUE,
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)start_bits);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
- OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0);
+ OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0);
found = JUMP(SLJIT_NOT_ZERO);
}
@@ -4692,8 +4696,8 @@ if (common->utf)
{
CMPTO(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xd800, start);
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800);
- OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800);
+ OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0);
}
@@ -4780,31 +4784,31 @@ struct sljit_jump *jump;
struct sljit_label *mainloop;
sljit_emit_fast_enter(compiler, RETURN_ADDR, 0);
-OP1(SLJIT_MOV, TMP1, 0, STACK_TOP, 0);
-GET_LOCAL_BASE(TMP3, 0, 0);
+OP1(SLJIT_MOV, TMP3, 0, STACK_TOP, 0);
+GET_LOCAL_BASE(TMP1, 0, 0);
/* Drop frames until we reach STACK_TOP. */
mainloop = LABEL();
-OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), 0);
-OP2(SLJIT_SUB | SLJIT_SET_S, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, 0);
-jump = JUMP(SLJIT_SIG_LESS_EQUAL);
-
-OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP3, 0);
-OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(TMP1), sizeof(sljit_sw));
-OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), sizeof(sljit_sw), SLJIT_MEM1(TMP1), 2 * sizeof(sljit_sw));
-OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 3 * sizeof(sljit_sw));
+OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), -sizeof(sljit_sw));
+jump = CMP(SLJIT_SIG_LESS_EQUAL, TMP2, 0, SLJIT_IMM, 0);
+
+OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0);
+OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -2 * sizeof(sljit_sw));
+OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), sizeof(sljit_sw), SLJIT_MEM1(STACK_TOP), -3 * sizeof(sljit_sw));
+OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 3 * sizeof(sljit_sw));
JUMPTO(SLJIT_JUMP, mainloop);
JUMPHERE(jump);
-jump = JUMP(SLJIT_SIG_LESS);
-/* End of dropping frames. */
+jump = CMP(SLJIT_NOT_ZERO /* SIG_LESS */, TMP2, 0, SLJIT_IMM, 0);
+/* End of reverting values. */
+OP1(SLJIT_MOV, STACK_TOP, 0, TMP3, 0);
sljit_emit_fast_return(compiler, RETURN_ADDR, 0);
JUMPHERE(jump);
OP1(SLJIT_NEG, TMP2, 0, TMP2, 0);
-OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP3, 0);
-OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(TMP1), sizeof(sljit_sw));
-OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 2 * sizeof(sljit_sw));
+OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0);
+OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -2 * sizeof(sljit_sw));
+OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2 * sizeof(sljit_sw));
JUMPTO(SLJIT_JUMP, mainloop);
}
@@ -4837,11 +4841,11 @@ if (common->use_ucp)
jump = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_UNDERSCORE);
add_jump(compiler, &common->getucd, JUMP(SLJIT_FAST_CALL));
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ucp_Ll);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_Lu - ucp_Ll);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_Lu - ucp_Ll);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ucp_Nd - ucp_Ll);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_No - ucp_Nd);
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_No - ucp_Nd);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
JUMPHERE(jump);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, TMP2, 0);
}
@@ -4881,11 +4885,11 @@ if (common->use_ucp)
jump = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_UNDERSCORE);
add_jump(compiler, &common->getucd, JUMP(SLJIT_FAST_CALL));
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ucp_Ll);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_Lu - ucp_Ll);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_Lu - ucp_Ll);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, ucp_Nd - ucp_Ll);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_No - ucp_Nd);
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ucp_No - ucp_Nd);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
JUMPHERE(jump);
}
else
@@ -4913,7 +4917,7 @@ else
}
set_jumps(skipread_list, LABEL());
-OP2(SLJIT_XOR | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
+OP2(SLJIT_XOR | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
sljit_emit_fast_return(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0);
}
@@ -5064,7 +5068,7 @@ switch(length)
return TRUE;
default:
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
return FALSE;
}
}
@@ -5077,22 +5081,22 @@ DEFINE_COMPILER;
sljit_emit_fast_enter(compiler, RETURN_ADDR, 0);
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x0a);
-OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x0d - 0x0a);
-OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_LESS_EQUAL);
-OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x0a);
+OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x0d - 0x0a);
+OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
+OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x0a);
#if defined SUPPORT_UTF || defined COMPILE_PCRE16 || defined COMPILE_PCRE32
#ifdef COMPILE_PCRE8
if (common->utf)
{
#endif
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x1);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2029 - 0x0a);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2029 - 0x0a);
#ifdef COMPILE_PCRE8
}
#endif
#endif /* SUPPORT_UTF || COMPILE_PCRE16 || COMPILE_PCRE32 */
-OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
sljit_emit_fast_return(compiler, RETURN_ADDR, 0);
}
@@ -5103,34 +5107,34 @@ DEFINE_COMPILER;
sljit_emit_fast_enter(compiler, RETURN_ADDR, 0);
-OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x09);
-OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
-OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x20);
-OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
-OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xa0);
+OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x09);
+OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
+OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x20);
+OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
+OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xa0);
#if defined SUPPORT_UTF || defined COMPILE_PCRE16 || defined COMPILE_PCRE32
#ifdef COMPILE_PCRE8
if (common->utf)
{
#endif
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x1680);
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e);
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x1680);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x2000);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x200A - 0x2000);
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_LESS_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x202f - 0x2000);
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x205f - 0x2000);
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x3000 - 0x2000);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x200A - 0x2000);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x202f - 0x2000);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x205f - 0x2000);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x3000 - 0x2000);
#ifdef COMPILE_PCRE8
}
#endif
#endif /* SUPPORT_UTF || COMPILE_PCRE16 || COMPILE_PCRE32 */
-OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
sljit_emit_fast_return(compiler, RETURN_ADDR, 0);
}
@@ -5143,22 +5147,22 @@ DEFINE_COMPILER;
sljit_emit_fast_enter(compiler, RETURN_ADDR, 0);
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x0a);
-OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x0d - 0x0a);
-OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_LESS_EQUAL);
-OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x0a);
+OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x0d - 0x0a);
+OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
+OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x0a);
#if defined SUPPORT_UTF || defined COMPILE_PCRE16 || defined COMPILE_PCRE32
#ifdef COMPILE_PCRE8
if (common->utf)
{
#endif
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
OP2(SLJIT_OR, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x1);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2029 - 0x0a);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2029 - 0x0a);
#ifdef COMPILE_PCRE8
}
#endif
#endif /* SUPPORT_UTF || COMPILE_PCRE16 || COMPILE_PCRE32 */
-OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
sljit_emit_fast_return(compiler, RETURN_ADDR, 0);
}
@@ -5183,7 +5187,7 @@ label = LABEL();
OP1(MOVU_UCHAR, CHAR1, 0, SLJIT_MEM1(TMP1), IN_UCHARS(1));
OP1(MOVU_UCHAR, CHAR2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
jump = CMP(SLJIT_NOT_EQUAL, CHAR1, 0, CHAR2, 0);
-OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
+OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
JUMPTO(SLJIT_NOT_ZERO, label);
JUMPHERE(jump);
@@ -5227,7 +5231,7 @@ OP1(SLJIT_MOV_U8, CHAR2, 0, SLJIT_MEM2(LCC_TABLE, CHAR2), 0);
JUMPHERE(jump);
#endif
jump = CMP(SLJIT_NOT_EQUAL, CHAR1, 0, CHAR2, 0);
-OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
+OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
JUMPTO(SLJIT_NOT_ZERO, label);
JUMPHERE(jump);
@@ -5394,7 +5398,7 @@ do
#endif
default:
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
break;
}
context->ucharptr = 0;
@@ -5568,7 +5572,7 @@ while (*cc != XCL_END)
break;
default:
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
break;
}
cc += 2;
@@ -5592,7 +5596,7 @@ if ((cc[-1] & XCL_HASPROP) == 0)
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
- OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0);
+ OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0);
add_jump(compiler, &found, JUMP(SLJIT_NOT_ZERO));
}
@@ -5625,7 +5629,7 @@ else if ((cc[-1] & XCL_MAP) != 0)
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
- OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0);
+ OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0);
add_jump(compiler, list, JUMP(SLJIT_NOT_ZERO));
#ifdef COMPILE_PCRE8
@@ -5644,6 +5648,15 @@ if (needstype || needsscript)
if (needschar && !charsaved)
OP1(SLJIT_MOV, RETURN_ADDR, 0, TMP1, 0);
+#ifdef COMPILE_PCRE32
+ if (!common->utf)
+ {
+ jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0x10ffff + 1);
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, INVALID_UTF_CHAR);
+ JUMPHERE(jump);
+ }
+#endif
+
OP2(SLJIT_LSHR, TMP2, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_SHIFT);
OP1(SLJIT_MOV_U8, TMP2, 0, SLJIT_MEM1(TMP2), (sljit_sw)PRIV(ucd_stage1));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, UCD_BLOCK_MASK);
@@ -5735,14 +5748,14 @@ while (*cc != XCL_END)
if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE))
{
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
- OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, numberofcmps == 0 ? SLJIT_UNUSED : TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
+ OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
numberofcmps++;
}
else if (numberofcmps > 0)
{
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
+ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
numberofcmps = 0;
}
@@ -5761,14 +5774,14 @@ while (*cc != XCL_END)
if (numberofcmps < 3 && (*cc == XCL_SINGLE || *cc == XCL_RANGE))
{
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
- OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, numberofcmps == 0 ? SLJIT_UNUSED : TMP2, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
+ OP_FLAGS(numberofcmps == 0 ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
numberofcmps++;
}
else if (numberofcmps > 0)
{
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(c - charoffset));
+ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
numberofcmps = 0;
}
@@ -5793,12 +5806,12 @@ while (*cc != XCL_END)
break;
case PT_LAMP:
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lu - typeoffset);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Ll - typeoffset);
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lt - typeoffset);
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lu - typeoffset);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Ll - typeoffset);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lt - typeoffset);
+ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
@@ -5820,33 +5833,33 @@ while (*cc != XCL_END)
case PT_SPACE:
case PT_PXSPACE:
SET_CHAR_OFFSET(9);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd - 0x9);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd - 0x9);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x9);
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x85 - 0x9);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e - 0x9);
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e - 0x9);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
SET_TYPE_OFFSET(ucp_Zl);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Zl);
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Zl);
+ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_WORD:
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_UNDERSCORE - charoffset));
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_UNDERSCORE - charoffset));
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
/* Fall through. */
case PT_ALNUM:
SET_TYPE_OFFSET(ucp_Ll);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lu - ucp_Ll);
- OP_FLAGS((*cc == PT_ALNUM) ? SLJIT_MOV : SLJIT_OR, TMP2, 0, (*cc == PT_ALNUM) ? SLJIT_UNUSED : TMP2, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Lu - ucp_Ll);
+ OP_FLAGS((*cc == PT_ALNUM) ? SLJIT_MOV : SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
SET_TYPE_OFFSET(ucp_Nd);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_No - ucp_Nd);
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_No - ucp_Nd);
+ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
@@ -5868,8 +5881,8 @@ while (*cc != XCL_END)
OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset);
OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]);
}
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, other_cases[1]);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, other_cases[1]);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
other_cases += 2;
}
else if (is_powerof2(other_cases[2] ^ other_cases[1]))
@@ -5881,63 +5894,63 @@ while (*cc != XCL_END)
OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)charoffset);
OP2(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_IMM, other_cases[1] ^ other_cases[0]);
}
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, other_cases[2]);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP2, 0, SLJIT_IMM, other_cases[2]);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(other_cases[0] - charoffset));
- OP_FLAGS(SLJIT_OR | ((other_cases[3] == NOTACHAR) ? SLJIT_SET_E : 0), TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(other_cases[0] - charoffset));
+ OP_FLAGS(SLJIT_OR | ((other_cases[3] == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL);
other_cases += 3;
}
else
{
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset));
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset));
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
}
while (*other_cases != NOTACHAR)
{
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset));
- OP_FLAGS(SLJIT_OR | ((*other_cases == NOTACHAR) ? SLJIT_SET_E : 0), TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(*other_cases++ - charoffset));
+ OP_FLAGS(SLJIT_OR | ((*other_cases == NOTACHAR) ? SLJIT_SET_Z : 0), TMP2, 0, SLJIT_EQUAL);
}
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_UCNC:
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_DOLLAR_SIGN - charoffset));
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_COMMERCIAL_AT - charoffset));
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_GRAVE_ACCENT - charoffset));
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_DOLLAR_SIGN - charoffset));
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_COMMERCIAL_AT - charoffset));
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(CHAR_GRAVE_ACCENT - charoffset));
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
SET_CHAR_OFFSET(0xa0);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(0xd7ff - charoffset));
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (sljit_sw)(0xd7ff - charoffset));
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_LESS_EQUAL);
SET_CHAR_OFFSET(0);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xe000 - 0);
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_GREATER_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_GREATER_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xe000 - 0);
+ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_GREATER_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
case PT_PXGRAPH:
/* C and Z groups are the farthest two groups. */
SET_TYPE_OFFSET(ucp_Ll);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_GREATER);
+ OP2(SLJIT_SUB | SLJIT_SET_GREATER, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER);
jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll);
/* In case of ucp_Cf, we overwrite the result. */
SET_CHAR_OFFSET(0x2066);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066);
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e - 0x2066);
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x180e - 0x2066);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
JUMPHERE(jump);
jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0);
@@ -5946,21 +5959,21 @@ while (*cc != XCL_END)
case PT_PXPRINT:
/* C and Z groups are the farthest two groups. */
SET_TYPE_OFFSET(ucp_Ll);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_GREATER);
+ OP2(SLJIT_SUB | SLJIT_SET_GREATER, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Ll);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_GREATER);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Ll);
- OP_FLAGS(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_NOT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Zs - ucp_Ll);
+ OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_NOT_EQUAL);
jump = CMP(SLJIT_NOT_EQUAL, typereg, 0, SLJIT_IMM, ucp_Cf - ucp_Ll);
/* In case of ucp_Cf, we overwrite the result. */
SET_CHAR_OFFSET(0x2066);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x2069 - 0x2066);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066);
- OP_FLAGS(SLJIT_OR, TMP2, 0, TMP2, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x061c - 0x2066);
+ OP_FLAGS(SLJIT_OR, TMP2, 0, SLJIT_EQUAL);
JUMPHERE(jump);
jump = CMP(SLJIT_ZERO ^ invertcmp, TMP2, 0, SLJIT_IMM, 0);
@@ -5968,21 +5981,21 @@ while (*cc != XCL_END)
case PT_PXPUNCT:
SET_TYPE_OFFSET(ucp_Sc);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Sc);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_So - ucp_Sc);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS_EQUAL);
SET_CHAR_OFFSET(0);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x7f);
- OP_FLAGS(SLJIT_AND, TMP2, 0, TMP2, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0x7f);
+ OP_FLAGS(SLJIT_AND, TMP2, 0, SLJIT_LESS_EQUAL);
SET_TYPE_OFFSET(ucp_Pc);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Ps - ucp_Pc);
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_LESS_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS_EQUAL, SLJIT_UNUSED, 0, typereg, 0, SLJIT_IMM, ucp_Ps - ucp_Pc);
+ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_LESS_EQUAL);
jump = JUMP(SLJIT_NOT_ZERO ^ invertcmp);
break;
default:
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
break;
}
cc += 2;
@@ -6028,6 +6041,7 @@ switch(type)
case OP_NOT_WORD_BOUNDARY:
case OP_WORD_BOUNDARY:
add_jump(compiler, &common->wordboundary, JUMP(SLJIT_FAST_CALL));
+ sljit_set_current_flags(compiler, SLJIT_SET_Z);
add_jump(compiler, backtracks, JUMP(type == OP_NOT_WORD_BOUNDARY ? SLJIT_NOT_ZERO : SLJIT_ZERO));
return cc;
@@ -6043,10 +6057,10 @@ switch(type)
else
{
jump[1] = CMP(SLJIT_EQUAL, TMP2, 0, STR_END, 0);
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP2, 0, STR_END, 0);
- OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_UNUSED, 0, SLJIT_LESS);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff);
- OP_FLAGS(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_NOT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_LESS, SLJIT_UNUSED, 0, TMP2, 0, STR_END, 0);
+ OP_FLAGS(SLJIT_MOV, TMP2, 0, SLJIT_LESS);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, (common->newline >> 8) & 0xff);
+ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_NOT_EQUAL);
add_jump(compiler, backtracks, JUMP(SLJIT_NOT_EQUAL));
check_partial(common, TRUE);
add_jump(compiler, backtracks, JUMP(SLJIT_JUMP));
@@ -6068,9 +6082,9 @@ switch(type)
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0));
jump[1] = CMP(SLJIT_NOT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_CR);
OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(2));
- OP2(SLJIT_SUB | SLJIT_SET_U, SLJIT_UNUSED, 0, TMP2, 0, STR_END, 0);
+ OP2(SLJIT_SUB | SLJIT_SET_Z | SLJIT_SET_GREATER, SLJIT_UNUSED, 0, TMP2, 0, STR_END, 0);
jump[2] = JUMP(SLJIT_GREATER);
- add_jump(compiler, backtracks, JUMP(SLJIT_LESS));
+ add_jump(compiler, backtracks, JUMP(SLJIT_NOT_EQUAL) /* LESS */);
/* Equal. */
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
jump[3] = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, CHAR_NL);
@@ -6089,6 +6103,7 @@ switch(type)
read_char_range(common, common->nlmin, common->nlmax, TRUE);
add_jump(compiler, backtracks, CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, STR_END, 0));
add_jump(compiler, &common->anynewline, JUMP(SLJIT_FAST_CALL));
+ sljit_set_current_flags(compiler, SLJIT_SET_Z);
add_jump(compiler, backtracks, JUMP(SLJIT_ZERO));
OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
}
@@ -6204,7 +6219,7 @@ switch(type)
label = LABEL();
add_jump(compiler, backtracks, CMP(SLJIT_LESS_EQUAL, STR_PTR, 0, TMP3, 0));
skip_char_back(common);
- OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, TMP2, 0, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, label);
}
else
@@ -6217,7 +6232,7 @@ switch(type)
check_start_used_ptr(common);
return cc + LINK_SIZE;
}
-SLJIT_ASSERT_STOP();
+SLJIT_UNREACHABLE();
return cc;
}
@@ -6250,7 +6265,7 @@ switch(type)
#endif
read_char8_type(common, type == OP_NOT_DIGIT);
/* Flip the starting bit in the negative case. */
- OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_digit);
+ OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_digit);
add_jump(compiler, backtracks, JUMP(type == OP_DIGIT ? SLJIT_ZERO : SLJIT_NOT_ZERO));
return cc;
@@ -6264,7 +6279,7 @@ switch(type)
else
#endif
read_char8_type(common, type == OP_NOT_WHITESPACE);
- OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_space);
+ OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_space);
add_jump(compiler, backtracks, JUMP(type == OP_WHITESPACE ? SLJIT_ZERO : SLJIT_NOT_ZERO));
return cc;
@@ -6278,7 +6293,7 @@ switch(type)
else
#endif
read_char8_type(common, type == OP_NOT_WORDCHAR);
- OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_word);
+ OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, ctype_word);
add_jump(compiler, backtracks, JUMP(type == OP_WORDCHAR ? SLJIT_ZERO : SLJIT_NOT_ZERO));
return cc;
@@ -6320,8 +6335,8 @@ switch(type)
#elif defined COMPILE_PCRE16
jump[0] = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xd800);
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800);
- OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_UNUSED, 0, SLJIT_EQUAL);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, SLJIT_IMM, 0xd800);
+ OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, TMP1, 0);
#endif
@@ -6383,6 +6398,7 @@ switch(type)
detect_partial_match(common, backtracks);
read_char_range(common, 0x9, 0x3000, type == OP_NOT_HSPACE);
add_jump(compiler, &common->hspace, JUMP(SLJIT_FAST_CALL));
+ sljit_set_current_flags(compiler, SLJIT_SET_Z);
add_jump(compiler, backtracks, JUMP(type == OP_NOT_HSPACE ? SLJIT_NOT_ZERO : SLJIT_ZERO));
return cc;
@@ -6392,6 +6408,7 @@ switch(type)
detect_partial_match(common, backtracks);
read_char_range(common, 0xa, 0x2029, type == OP_NOT_VSPACE);
add_jump(compiler, &common->vspace, JUMP(SLJIT_FAST_CALL));
+ sljit_set_current_flags(compiler, SLJIT_SET_Z);
add_jump(compiler, backtracks, JUMP(type == OP_NOT_VSPACE ? SLJIT_NOT_ZERO : SLJIT_ZERO));
return cc;
@@ -6418,7 +6435,7 @@ switch(type)
OP1(SLJIT_MOV_U32, TMP1, 0, SLJIT_MEM1(STACK_TOP), (sljit_sw)PRIV(ucp_gbtable));
OP1(SLJIT_MOV, STACK_TOP, 0, TMP2, 0);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
- OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0);
+ OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0);
JUMPTO(SLJIT_NOT_ZERO, label);
OP1(SLJIT_MOV, STR_PTR, 0, TMP3, 0);
@@ -6587,7 +6604,7 @@ switch(type)
OP2(SLJIT_LSHR, TMP1, 0, TMP1, 0, SLJIT_IMM, 3);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP1), (sljit_sw)cc);
OP2(SLJIT_SHL, TMP2, 0, SLJIT_IMM, 1, TMP2, 0);
- OP2(SLJIT_AND | SLJIT_SET_E, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0);
+ OP2(SLJIT_AND | SLJIT_SET_Z, SLJIT_UNUSED, 0, TMP1, 0, TMP2, 0);
add_jump(compiler, backtracks, JUMP(SLJIT_ZERO));
#if defined SUPPORT_UTF || !defined COMPILE_PCRE8
@@ -6604,7 +6621,7 @@ switch(type)
return cc + GET(cc, 0) - 1;
#endif
}
-SLJIT_ASSERT_STOP();
+SLJIT_UNREACHABLE();
return cc;
}
@@ -6790,9 +6807,9 @@ else
#endif /* SUPPORT_UTF && SUPPORT_UCP */
{
if (ref)
- OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), TMP1, 0);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), TMP1, 0);
else
- OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw), TMP1, 0);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw), TMP1, 0);
if (withchecks)
jump = JUMP(SLJIT_ZERO);
@@ -6883,7 +6900,7 @@ switch(type)
cc += 1 + IMM2_SIZE + 1 + 2 * IMM2_SIZE;
break;
default:
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
break;
}
@@ -6897,7 +6914,7 @@ if (!minimize)
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(1), SLJIT_IMM, 0);
/* Temporary release of STR_PTR. */
- OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
+ OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
/* Handles both invalid and empty cases. Since the minimum repeat,
is zero the invalid case is basically the same as an empty case. */
if (ref)
@@ -6910,7 +6927,7 @@ if (!minimize)
zerolength = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw));
}
/* Restore if not zero length. */
- OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
}
else
{
@@ -7157,7 +7174,7 @@ return (*PUBL(callout))(callout_block);
(((int)sizeof(PUBL(callout_block)) + 7) & ~7)
#define CALLOUT_ARG_OFFSET(arg) \
- (-CALLOUT_ARG_SIZE + SLJIT_OFFSETOF(PUBL(callout_block), arg))
+ SLJIT_OFFSETOF(PUBL(callout_block), arg)
static SLJIT_INLINE pcre_uchar *compile_callout_matchingpath(compiler_common *common, pcre_uchar *cc, backtrack_common *parent)
{
@@ -7187,7 +7204,8 @@ OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), CALLOUT_ARG_OFFSET(mark), (common->mark_pt
/* Needed to save important temporary registers. */
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0);
-OP2(SLJIT_SUB, SLJIT_R1, 0, STACK_TOP, 0, SLJIT_IMM, CALLOUT_ARG_SIZE);
+/* SLJIT_R0 = arguments */
+OP1(SLJIT_MOV, SLJIT_R1, 0, STACK_TOP, 0);
GET_LOCAL_BASE(SLJIT_R2, 0, OVECTOR_START);
sljit_emit_ijump(compiler, SLJIT_CALL3, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_callout));
OP1(SLJIT_MOV_S32, SLJIT_RETURN_REG, 0, SLJIT_RETURN_REG, 0);
@@ -7195,12 +7213,12 @@ OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
free_stack(common, CALLOUT_ARG_SIZE / sizeof(sljit_sw));
/* Check return value. */
-OP2(SLJIT_SUB | SLJIT_SET_S, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
+OP2(SLJIT_SUB | SLJIT_SET_Z | SLJIT_SET_SIG_GREATER, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_SIG_GREATER));
if (common->forced_quit_label == NULL)
- add_jump(compiler, &common->forced_quit, JUMP(SLJIT_SIG_LESS));
+ add_jump(compiler, &common->forced_quit, JUMP(SLJIT_NOT_EQUAL) /* SIG_LESS */);
else
- JUMPTO(SLJIT_SIG_LESS, common->forced_quit_label);
+ JUMPTO(SLJIT_NOT_EQUAL /* SIG_LESS */, common->forced_quit_label);
return cc + 2 + 2 * LINK_SIZE;
}
@@ -7321,7 +7339,7 @@ else
allocate_stack(common, framesize + extrasize);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
- OP2(SLJIT_SUB, TMP2, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + extrasize) * sizeof(sljit_sw));
+ OP2(SLJIT_ADD, TMP2, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + extrasize) * sizeof(sljit_sw));
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, TMP2, 0);
if (needs_control_head)
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr);
@@ -7392,22 +7410,22 @@ while (1)
free_stack(common, extrasize);
if (needs_control_head)
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), 0);
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), STACK(-1));
}
else
{
if ((opcode != OP_ASSERT_NOT && opcode != OP_ASSERTBACK_NOT) || conditional)
{
/* We don't need to keep the STR_PTR, only the previous private_data_ptr. */
- OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + 1) * sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + 1) * sizeof(sljit_sw));
if (needs_control_head)
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), 0);
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), STACK(-1));
}
else
{
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
if (needs_control_head)
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), (framesize + 1) * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_MEM1(STACK_TOP), STACK(-framesize - 2));
add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL));
}
}
@@ -7418,25 +7436,25 @@ while (1)
if (conditional)
{
if (extrasize > 0)
- OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), needs_control_head ? sizeof(sljit_sw) : 0);
+ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), needs_control_head ? STACK(-2) : STACK(-1));
}
else if (bra == OP_BRAZERO)
{
if (framesize < 0)
- OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), (extrasize - 1) * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(-extrasize));
else
{
- OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), framesize * sizeof(sljit_sw));
- OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), (framesize + extrasize - 1) * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(-framesize - 1));
+ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(-framesize - extrasize));
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, TMP1, 0);
}
- OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0);
}
else if (framesize >= 0)
{
/* For OP_BRA and OP_BRAMINZERO. */
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), framesize * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-framesize - 1));
}
}
add_jump(compiler, found, JUMP(SLJIT_JUMP));
@@ -7480,12 +7498,12 @@ if (common->positive_assert_quit != NULL)
set_jumps(common->positive_assert_quit, LABEL());
SLJIT_ASSERT(framesize != no_stack);
if (framesize < 0)
- OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, extrasize * sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, extrasize * sizeof(sljit_sw));
else
{
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL));
- OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + extrasize) * sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + extrasize) * sizeof(sljit_sw));
}
JUMPHERE(jump);
}
@@ -7534,18 +7552,18 @@ if (opcode == OP_ASSERT || opcode == OP_ASSERTBACK)
{
/* We know that STR_PTR was stored on the top of the stack. */
if (extrasize > 0)
- OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), (extrasize - 1) * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(-extrasize));
/* Keep the STR_PTR on the top of the stack. */
if (bra == OP_BRAZERO)
{
- OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
if (extrasize == 2)
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), STR_PTR, 0);
}
else if (bra == OP_BRAMINZERO)
{
- OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0);
}
}
@@ -7554,13 +7572,13 @@ if (opcode == OP_ASSERT || opcode == OP_ASSERTBACK)
if (bra == OP_BRA)
{
/* We don't need to keep the STR_PTR, only the previous private_data_ptr. */
- OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + 1) * sizeof(sljit_sw));
- OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), (extrasize - 2) * sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + 1) * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(-extrasize + 1));
}
else
{
/* We don't need to keep the STR_PTR, only the previous private_data_ptr. */
- OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + 2) * sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + 2) * sizeof(sljit_sw));
if (extrasize == 2)
{
OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0));
@@ -7588,7 +7606,7 @@ if (opcode == OP_ASSERT || opcode == OP_ASSERTBACK)
{
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL));
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), framesize * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-framesize - 1));
}
set_jumps(backtrack->common.topbacktracks, LABEL());
}
@@ -7675,23 +7693,23 @@ if (framesize < 0)
}
if (needs_control_head)
- OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), (ket != OP_KET || has_alternatives) ? sizeof(sljit_sw) : 0);
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), (ket != OP_KET || has_alternatives) ? STACK(-2) : STACK(-1));
/* TMP2 which is set here used by OP_KETRMAX below. */
if (ket == OP_KETRMAX)
- OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), 0);
+ OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(-1));
else if (ket == OP_KETRMIN)
{
/* Move the STR_PTR to the private_data_ptr. */
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), 0);
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-1));
}
}
else
{
stacksize = (ket != OP_KET || has_alternatives) ? 2 : 1;
- OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + stacksize) * sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, (framesize + stacksize) * sizeof(sljit_sw));
if (needs_control_head)
- OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), 0);
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(-1));
if (ket == OP_KETRMAX)
{
@@ -7927,7 +7945,7 @@ if (bra == OP_BRAMINZERO)
{
/* Except when the whole stack frame must be saved. */
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
- braminzero = CMP(SLJIT_EQUAL, STR_PTR, 0, SLJIT_MEM1(TMP1), (BACKTRACK_AS(bracket_backtrack)->u.framesize + 1) * sizeof(sljit_sw));
+ braminzero = CMP(SLJIT_EQUAL, STR_PTR, 0, SLJIT_MEM1(TMP1), STACK(-BACKTRACK_AS(bracket_backtrack)->u.framesize - 2));
}
JUMPHERE(skip);
}
@@ -8000,7 +8018,7 @@ if (opcode == OP_ONCE)
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize), STR_PTR, 0);
if (BACKTRACK_AS(bracket_backtrack)->u.framesize == no_frame)
- OP2(SLJIT_SUB, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STACK_TOP, 0, SLJIT_IMM, needs_control_head ? (2 * sizeof(sljit_sw)) : sizeof(sljit_sw));
+ OP2(SLJIT_ADD, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STACK_TOP, 0, SLJIT_IMM, needs_control_head ? (2 * sizeof(sljit_sw)) : sizeof(sljit_sw));
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(stacksize + 1), TMP2, 0);
}
else if (ket == OP_KETRMAX || has_alternatives)
@@ -8018,7 +8036,7 @@ if (opcode == OP_ONCE)
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), TMP2, 0);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
- OP2(SLJIT_SUB, TMP2, 0, STACK_TOP, 0, SLJIT_IMM, stacksize * sizeof(sljit_sw));
+ OP2(SLJIT_ADD, TMP2, 0, STACK_TOP, 0, SLJIT_IMM, stacksize * sizeof(sljit_sw));
stacksize = needs_control_head ? 1 : 0;
if (ket != OP_KET || has_alternatives)
@@ -8090,13 +8108,13 @@ if (opcode == OP_COND || opcode == OP_SCOND)
slot = common->name_table + GET2(matchingpath, 1) * common->name_entry_size;
OP1(SLJIT_MOV, TMP3, 0, STR_PTR, 0);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(1));
- OP2(SLJIT_SUB | SLJIT_SET_E, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(GET2(slot, 0) << 1), TMP1, 0);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(GET2(slot, 0) << 1), TMP1, 0);
slot += common->name_entry_size;
i--;
while (i-- > 0)
{
OP2(SLJIT_SUB, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(GET2(slot, 0) << 1), TMP1, 0);
- OP2(SLJIT_OR | SLJIT_SET_E, TMP2, 0, TMP2, 0, STR_PTR, 0);
+ OP2(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, TMP2, 0, STR_PTR, 0);
slot += common->name_entry_size;
}
OP1(SLJIT_MOV, STR_PTR, 0, TMP3, 0);
@@ -8111,7 +8129,7 @@ if (opcode == OP_COND || opcode == OP_SCOND)
if (*matchingpath == OP_FAIL)
stacksize = 0;
- if (*matchingpath == OP_RREF)
+ else if (*matchingpath == OP_RREF)
{
stacksize = GET2(matchingpath, 1);
if (common->currententry == NULL)
@@ -8244,7 +8262,7 @@ if (ket == OP_KETRMAX)
{
if (has_alternatives)
BACKTRACK_AS(bracket_backtrack)->alternative_matchingpath = LABEL();
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, rmax_label);
/* Drop STR_PTR for greedy plus quantifier. */
if (opcode != OP_ONCE)
@@ -8274,7 +8292,7 @@ if (ket == OP_KETRMAX)
if (repeat_type == OP_EXACT)
{
count_match(common);
- OP2(SLJIT_SUB | SLJIT_SET_E, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_MEM1(SLJIT_SP), repeat_ptr, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, rmax_label);
}
else if (repeat_type == OP_UPTO)
@@ -8374,7 +8392,7 @@ switch(opcode)
break;
default:
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
break;
}
@@ -8452,7 +8470,7 @@ else
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
if (needs_control_head)
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr);
- OP2(SLJIT_SUB, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STACK_TOP, 0, SLJIT_IMM, -STACK(stacksize - 1));
+ OP2(SLJIT_ADD, SLJIT_MEM1(SLJIT_SP), private_data_ptr, STACK_TOP, 0, SLJIT_IMM, stacksize * sizeof(sljit_sw));
stack = 0;
if (!zero)
@@ -8524,7 +8542,7 @@ while (*cc != OP_KETRPOS)
{
if (offset != 0)
{
- OP2(SLJIT_ADD, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, stacksize * sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_IMM, stacksize * sizeof(sljit_sw));
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), cbraprivptr);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1), STR_PTR, 0);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), cbraprivptr, STR_PTR, 0);
@@ -8535,10 +8553,10 @@ while (*cc != OP_KETRPOS)
else
{
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
- OP2(SLJIT_ADD, STACK_TOP, 0, TMP2, 0, SLJIT_IMM, stacksize * sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, TMP2, 0, SLJIT_IMM, stacksize * sizeof(sljit_sw));
if (opcode == OP_SBRAPOS)
- OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), (framesize + 1) * sizeof(sljit_sw));
- OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), (framesize + 1) * sizeof(sljit_sw), STR_PTR, 0);
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), STACK(-framesize - 2));
+ OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), STACK(-framesize - 2), STR_PTR, 0);
}
/* Even if the match is empty, we need to reset the control head. */
@@ -8584,7 +8602,7 @@ while (*cc != OP_KETRPOS)
else
{
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
- OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(TMP2), (framesize + 1) * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(TMP2), STACK(-framesize - 2));
}
}
@@ -8601,7 +8619,7 @@ if (!zero)
if (framesize < 0)
add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), STACK(stacksize - 1), SLJIT_IMM, 0));
else /* TMP2 is set to [private_data_ptr] above. */
- add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_NOT_EQUAL, SLJIT_MEM1(TMP2), (stacksize - 1) * sizeof(sljit_sw), SLJIT_IMM, 0));
+ add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_NOT_EQUAL, SLJIT_MEM1(TMP2), STACK(-stacksize), SLJIT_IMM, 0));
}
/* None of them matched. */
@@ -8824,7 +8842,7 @@ if (exact > 1)
OP1(SLJIT_MOV, tmp_base, tmp_offset, SLJIT_IMM, exact);
label = LABEL();
compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks, FALSE);
- OP2(SLJIT_SUB | SLJIT_SET_E, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, label);
}
else
@@ -8832,7 +8850,7 @@ if (exact > 1)
OP1(SLJIT_MOV, tmp_base, tmp_offset, SLJIT_IMM, exact);
label = LABEL();
compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks, TRUE);
- OP2(SLJIT_SUB | SLJIT_SET_E, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, label);
}
}
@@ -8862,7 +8880,7 @@ switch(opcode)
if (opcode == OP_UPTO)
{
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), POSSESSIVE0);
- OP2(SLJIT_SUB | SLJIT_SET_E, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
jump = JUMP(SLJIT_ZERO);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), POSSESSIVE0, TMP1, 0);
}
@@ -8924,7 +8942,7 @@ switch(opcode)
label = LABEL();
if (opcode == OP_UPTO)
{
- OP2(SLJIT_SUB | SLJIT_SET_E, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_ZERO));
}
compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks, FALSE);
@@ -8944,7 +8962,7 @@ switch(opcode)
OP1(SLJIT_MOV, base, offset1, STR_PTR, 0);
if (opcode == OP_UPTO)
{
- OP2(SLJIT_SUB | SLJIT_SET_E, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
add_jump(compiler, &no_match, JUMP(SLJIT_ZERO));
}
@@ -8971,7 +8989,7 @@ switch(opcode)
if (opcode == OP_UPTO)
{
- OP2(SLJIT_SUB | SLJIT_SET_E, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, label);
}
else
@@ -9000,7 +9018,7 @@ switch(opcode)
if (opcode == OP_UPTO)
{
- OP2(SLJIT_SUB | SLJIT_SET_E, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, label);
}
else
@@ -9026,7 +9044,7 @@ switch(opcode)
compile_char1_matchingpath(common, type, cc, &no_char1_match, FALSE);
if (opcode == OP_UPTO)
{
- OP2(SLJIT_SUB | SLJIT_SET_E, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, label);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
}
@@ -9113,7 +9131,7 @@ switch(opcode)
label = LABEL();
compile_char1_matchingpath(common, type, cc, &no_match, TRUE);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), POSSESSIVE1, STR_PTR, 0);
- OP2(SLJIT_SUB | SLJIT_SET_E, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, label);
set_jumps(no_match, LABEL());
OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), POSSESSIVE1);
@@ -9124,7 +9142,7 @@ switch(opcode)
label = LABEL();
detect_partial_match(common, &no_match);
compile_char1_matchingpath(common, type, cc, &no_char1_match, FALSE);
- OP2(SLJIT_SUB | SLJIT_SET_E, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, tmp_base, tmp_offset, tmp_base, tmp_offset, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, label);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
set_jumps(no_char1_match, LABEL());
@@ -9142,7 +9160,7 @@ switch(opcode)
break;
default:
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
break;
}
@@ -9264,7 +9282,7 @@ size = 3 + (size < 0 ? 0 : size);
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr);
allocate_stack(common, size);
if (size > 3)
- OP2(SLJIT_SUB, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, STACK_TOP, 0, SLJIT_IMM, (size - 3) * sizeof(sljit_sw));
+ OP2(SLJIT_ADD, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, STACK_TOP, 0, SLJIT_IMM, (size - 3) * sizeof(sljit_sw));
else
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, STACK_TOP, 0);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(size - 1), SLJIT_IMM, BACKTRACK_AS(then_trap_backtrack)->start);
@@ -9569,7 +9587,7 @@ while (cc < ccend)
break;
default:
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
return;
}
if (cc == NULL)
@@ -9677,7 +9695,7 @@ switch(opcode)
case OP_MINUPTO:
OP1(SLJIT_MOV, TMP1, 0, base, offset1);
OP1(SLJIT_MOV, STR_PTR, 0, base, offset0);
- OP2(SLJIT_SUB | SLJIT_SET_E, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
add_jump(compiler, &jumplist, JUMP(SLJIT_ZERO));
OP1(SLJIT_MOV, base, offset1, TMP1, 0);
@@ -9723,7 +9741,7 @@ switch(opcode)
break;
default:
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
break;
}
@@ -9831,7 +9849,7 @@ if (*cc == OP_ASSERT || *cc == OP_ASSERTBACK)
{
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), CURRENT_AS(assert_backtrack)->private_data_ptr);
add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL));
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), CURRENT_AS(assert_backtrack)->private_data_ptr, SLJIT_MEM1(STACK_TOP), CURRENT_AS(assert_backtrack)->framesize * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), CURRENT_AS(assert_backtrack)->private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-CURRENT_AS(assert_backtrack)->framesize - 1));
set_jumps(current->topbacktracks, LABEL());
}
@@ -9841,7 +9859,7 @@ else
if (bra == OP_BRAZERO)
{
/* We know there is enough place on the stack. */
- OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
+ OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, sizeof(sljit_sw));
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(0), SLJIT_IMM, 0);
JUMPTO(SLJIT_JUMP, CURRENT_AS(assert_backtrack)->matchingpath);
JUMPHERE(brajump);
@@ -9954,7 +9972,7 @@ else if (ket == OP_KETRMIN)
else
{
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), private_data_ptr);
- CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_MEM1(TMP1), (CURRENT_AS(bracket_backtrack)->u.framesize + 1) * sizeof(sljit_sw), CURRENT_AS(bracket_backtrack)->recursive_matchingpath);
+ CMPTO(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_MEM1(TMP1), STACK(-CURRENT_AS(bracket_backtrack)->u.framesize - 2), CURRENT_AS(bracket_backtrack)->recursive_matchingpath);
}
/* Drop STR_PTR for non-greedy plus quantifier. */
if (opcode != OP_ONCE)
@@ -10060,7 +10078,7 @@ if (SLJIT_UNLIKELY(opcode == OP_COND) || SLJIT_UNLIKELY(opcode == OP_SCOND))
{
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), assert->private_data_ptr);
add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL));
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), assert->private_data_ptr, SLJIT_MEM1(STACK_TOP), assert->framesize * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), assert->private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-assert->framesize - 1));
}
cond = JUMP(SLJIT_JUMP);
set_jumps(CURRENT_AS(bracket_backtrack)->u.assert->condfailed, LABEL());
@@ -10201,7 +10219,7 @@ if (has_alternatives)
{
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), assert->private_data_ptr);
add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL));
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), assert->private_data_ptr, SLJIT_MEM1(STACK_TOP), assert->framesize * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), assert->private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-assert->framesize - 1));
}
JUMPHERE(cond);
}
@@ -10256,7 +10274,7 @@ else if (opcode == OP_ONCE)
JUMPHERE(once);
/* Restore previous private_data_ptr */
if (CURRENT_AS(bracket_backtrack)->u.framesize >= 0)
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), CURRENT_AS(bracket_backtrack)->u.framesize * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-CURRENT_AS(bracket_backtrack)->u.framesize - 1));
else if (ket == OP_KETRMIN)
{
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(1));
@@ -10346,7 +10364,7 @@ if (current->topbacktracks)
free_stack(common, CURRENT_AS(bracketpos_backtrack)->stacksize);
JUMPHERE(jump);
}
-OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), CURRENT_AS(bracketpos_backtrack)->private_data_ptr, SLJIT_MEM1(STACK_TOP), CURRENT_AS(bracketpos_backtrack)->framesize * sizeof(sljit_sw));
+OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), CURRENT_AS(bracketpos_backtrack)->private_data_ptr, SLJIT_MEM1(STACK_TOP), STACK(-CURRENT_AS(bracketpos_backtrack)->framesize - 1));
}
static SLJIT_INLINE void compile_braminzero_backtrackingpath(compiler_common *common, struct backtrack_common *current)
@@ -10392,10 +10410,10 @@ if (opcode == OP_THEN || opcode == OP_THEN_ARG)
jump = JUMP(SLJIT_JUMP);
loop = LABEL();
- OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), -(int)sizeof(sljit_sw));
+ OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), STACK(0));
JUMPHERE(jump);
- CMPTO(SLJIT_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), -(int)(2 * sizeof(sljit_sw)), TMP1, 0, loop);
- CMPTO(SLJIT_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), -(int)(3 * sizeof(sljit_sw)), TMP2, 0, loop);
+ CMPTO(SLJIT_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), STACK(1), TMP1, 0, loop);
+ CMPTO(SLJIT_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), STACK(2), TMP2, 0, loop);
add_jump(compiler, &common->then_trap->quit, JUMP(SLJIT_JUMP));
return;
}
@@ -10645,7 +10663,7 @@ while (current)
break;
default:
- SLJIT_ASSERT_STOP();
+ SLJIT_UNREACHABLE();
break;
}
current = current->prev;
@@ -10684,7 +10702,7 @@ sljit_emit_fast_enter(compiler, TMP2, 0);
count_match(common);
allocate_stack(common, private_data_size + framesize + alternativesize);
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), STACK(private_data_size + framesize + alternativesize - 1), TMP2, 0);
-copy_private_data(common, ccbegin, ccend, TRUE, private_data_size + framesize + alternativesize, framesize + alternativesize, needs_control_head);
+copy_private_data(common, ccbegin, ccend, TRUE, framesize + alternativesize, private_data_size + framesize + alternativesize, needs_control_head);
if (needs_control_head)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_IMM, 0);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->recursive_head_ptr, STACK_TOP, 0);
@@ -10737,9 +10755,9 @@ if (common->quit != NULL)
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), common->recursive_head_ptr);
if (needs_frame)
{
- OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw));
- add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL));
OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw));
+ add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL));
+ OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw));
}
OP1(SLJIT_MOV, TMP3, 0, SLJIT_IMM, 0);
common->quit = NULL;
@@ -10750,32 +10768,32 @@ set_jumps(common->accept, LABEL());
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), common->recursive_head_ptr);
if (needs_frame)
{
- OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw));
- add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL));
OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw));
+ add_jump(compiler, &common->revertframes, JUMP(SLJIT_FAST_CALL));
+ OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, (framesize + alternativesize) * sizeof(sljit_sw));
}
OP1(SLJIT_MOV, TMP3, 0, SLJIT_IMM, 1);
JUMPHERE(jump);
if (common->quit != NULL)
set_jumps(common->quit, LABEL());
-copy_private_data(common, ccbegin, ccend, FALSE, private_data_size + framesize + alternativesize, framesize + alternativesize, needs_control_head);
+copy_private_data(common, ccbegin, ccend, FALSE, framesize + alternativesize, private_data_size + framesize + alternativesize, needs_control_head);
free_stack(common, private_data_size + framesize + alternativesize);
if (needs_control_head)
{
- OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), 2 * sizeof(sljit_sw));
- OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), sizeof(sljit_sw));
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), STACK(-3));
+ OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(-2));
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->recursive_head_ptr, TMP1, 0);
OP1(SLJIT_MOV, TMP1, 0, TMP3, 0);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, TMP2, 0);
}
else
{
- OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), sizeof(sljit_sw));
+ OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), STACK(-2));
OP1(SLJIT_MOV, TMP1, 0, TMP3, 0);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->recursive_head_ptr, TMP2, 0);
}
-sljit_emit_fast_return(compiler, SLJIT_MEM1(STACK_TOP), 0);
+sljit_emit_fast_return(compiler, SLJIT_MEM1(STACK_TOP), STACK(-1));
}
#undef COMPILE_BACKTRACKINGPATH
@@ -11237,7 +11255,7 @@ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, TMP2, 0);
OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack));
OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, top), STACK_TOP, 0);
-OP2(SLJIT_ADD, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, limit), SLJIT_IMM, STACK_GROWTH_RATE);
+OP2(SLJIT_SUB, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, limit), SLJIT_IMM, STACK_GROWTH_RATE);
sljit_emit_ijump(compiler, SLJIT_CALL2, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_stack_resize));
jump = CMP(SLJIT_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
@@ -11391,10 +11409,10 @@ union {
sljit_u8 local_space[MACHINE_STACK_SIZE];
struct sljit_stack local_stack;
-local_stack.top = (sljit_sw)&local_space;
-local_stack.base = local_stack.top;
-local_stack.limit = local_stack.base + MACHINE_STACK_SIZE;
-local_stack.max_limit = local_stack.limit;
+local_stack.max_limit = local_space;
+local_stack.limit = local_space;
+local_stack.base = local_space + MACHINE_STACK_SIZE;
+local_stack.top = local_space + MACHINE_STACK_SIZE;
arguments->stack = &local_stack;
convert_executable_func.executable_func = executable_func;
return convert_executable_func.call_executable_func(arguments);
diff --git a/erts/emulator/pcre/pcre_tables.c b/erts/emulator/pcre/pcre_tables.c
index 2f6302e2e1..08e31f1460 100644
--- a/erts/emulator/pcre/pcre_tables.c
+++ b/erts/emulator/pcre/pcre_tables.c
@@ -6,7 +6,7 @@
and semantics are as close as possible to those of the Perl 5 language.
Written by Philip Hazel
- Copyright (c) 1997-2012 University of Cambridge
+ Copyright (c) 1997-2017 University of Cambridge
-----------------------------------------------------------------------------
Redistribution and use in source and binary forms, with or without
@@ -162,7 +162,7 @@ const pcre_uint32 PRIV(ucp_gbtable[]) = {
(1<<ucp_gbExtend)|(1<<ucp_gbSpacingMark), /* 5 SpacingMark */
(1<<ucp_gbExtend)|(1<<ucp_gbSpacingMark)|(1<<ucp_gbL)| /* 6 L */
- (1<<ucp_gbL)|(1<<ucp_gbV)|(1<<ucp_gbLV)|(1<<ucp_gbLVT),
+ (1<<ucp_gbV)|(1<<ucp_gbLV)|(1<<ucp_gbLVT),
(1<<ucp_gbExtend)|(1<<ucp_gbSpacingMark)|(1<<ucp_gbV)| /* 7 V */
(1<<ucp_gbT),
diff --git a/erts/emulator/pcre/pcre_ucd.c b/erts/emulator/pcre/pcre_ucd.c
index 9b700c0785..2dd4b05751 100644
--- a/erts/emulator/pcre/pcre_ucd.c
+++ b/erts/emulator/pcre/pcre_ucd.c
@@ -38,6 +38,20 @@ const pcre_uint16 PRIV(ucd_stage2)[] = {0};
const pcre_uint32 PRIV(ucd_caseless_sets)[] = {0};
#else
+/* If the 32-bit library is run in non-32-bit mode, character values
+greater than 0x10ffff may be encountered. For these we set up a
+special record. */
+
+#ifdef COMPILE_PCRE32
+const ucd_record PRIV(dummy_ucd_record)[] = {{
+ ucp_Common, /* script */
+ ucp_Cn, /* type unassigned */
+ ucp_gbOther, /* grapheme break property */
+ 0, /* case set */
+ 0, /* other case */
+ }};
+#endif
+
/* When recompiling tables with a new Unicode version, please check the
types in this structure definition from pcre_internal.h (the actual
field names will be different):
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index ad580e7d52..799f67fc45 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -2564,24 +2564,22 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void)
#ifdef ERTS_SMP
init_removed_fd_alloc();
pollset.removed_list = NULL;
- erts_smp_spinlock_init(&pollset.removed_list_lock,
- "pollset_rm_list");
+ erts_smp_spinlock_init(&pollset.removed_list_lock, "pollset_rm_list", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
{
- int i;
- for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_mtx_init_x(&drv_ev_state_locks[i].lck, "drv_ev_state", make_small(i));
-#else
- erts_smp_mtx_init(&drv_ev_state_locks[i].lck, "drv_ev_state");
-#endif
- }
+ int i;
+ for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) {
+ erts_smp_mtx_init(&drv_ev_state_locks[i].lck, "drv_ev_state", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
}
#endif
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
max_fds = ERTS_CIO_POLL_MAX_FDS();
erts_smp_atomic_init_nob(&drv_ev_state_len, 0);
drv_ev_state = NULL;
- erts_smp_mtx_init(&drv_ev_state_grow_lock, "drv_ev_state_grow");
+ erts_smp_mtx_init(&drv_ev_state_grow_lock, "drv_ev_state_grow", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
#else
{
SafeHashFunctions hf;
@@ -2591,10 +2589,11 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void)
hf.free = &drv_ev_state_free;
num_state_prealloc = 0;
state_prealloc_first = NULL;
- erts_smp_spinlock_init(&state_prealloc_lock,"state_prealloc");
+ erts_smp_spinlock_init(&state_prealloc_lock,"state_prealloc", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
- safe_hash_init(ERTS_ALC_T_DRV_EV_STATE, &drv_ev_state_tab, "drv_ev_state_tab",
- DRV_EV_STATE_HTAB_SIZE, hf);
+ safe_hash_init(ERTS_ALC_T_DRV_EV_STATE, &drv_ev_state_tab, "drv_ev_state_tab",
+ ERTS_LOCK_FLAGS_CATEGORY_IO, DRV_EV_STATE_HTAB_SIZE, hf);
}
#endif
}
@@ -3130,3 +3129,12 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(ErtsCheckIoDebugInfo *ciodip)
return counters.num_errors;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void ERTS_CIO_EXPORT(erts_lcnt_update_cio_locks)(int enable) {
+#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ erts_lcnt_enable_hash_lock_count(&drv_ev_state_tab, ERTS_LOCK_FLAGS_CATEGORY_IO, enable);
+#else
+ (void)enable;
+#endif
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h
index ee4abeece9..2d3bb98afa 100644
--- a/erts/emulator/sys/common/erl_check_io.h
+++ b/erts/emulator/sys/common/erl_check_io.h
@@ -59,6 +59,11 @@ void erts_init_check_io_nkp(void);
int erts_check_io_debug_kp(ErtsCheckIoDebugInfo *);
int erts_check_io_debug_nkp(ErtsCheckIoDebugInfo *);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_update_cio_locks_kp(int enable);
+void erts_lcnt_update_cio_locks_nkp(int enable);
+#endif
+
#else /* !ERTS_ENABLE_KERNEL_POLL */
Uint erts_check_io_size(void);
@@ -72,6 +77,10 @@ void erts_check_io_interrupt_timed(int, ErtsMonotonicTime);
void erts_check_io(int);
void erts_init_check_io(void);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_update_cio_locks(int enable);
+#endif
+
#endif
extern erts_smp_atomic_t erts_check_io_time;
diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c
index bb930ff03b..214ed01c82 100644
--- a/erts/emulator/sys/common/erl_mmap.c
+++ b/erts/emulator/sys/common/erl_mmap.c
@@ -2212,9 +2212,11 @@ erts_mmap_init(ErtsMemMapper* mm, ErtsMMapInit *init, int executable)
erts_exit(1, "erts_mmap: Failed to open /dev/zero\n");
#endif
- erts_smp_mtx_init(&mm->mtx, "erts_mmap");
+ erts_smp_mtx_init(&mm->mtx, "erts_mmap", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
if (is_first_call) {
- erts_mtx_init(&am.init_mutex, "mmap_init_atoms");
+ erts_mtx_init(&am.init_mutex, "mmap_init_atoms", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION
@@ -2816,7 +2818,8 @@ static void hard_dbg_mseg_init(void)
{
ErtsFreeSegDesc_fake* p;
- erts_mtx_init(&hard_dbg_mseg_mtx, "hard_dbg_mseg");
+ erts_mtx_init(&hard_dbg_mseg_mtx, "hard_dbg_mseg", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
hard_dbg_mseg_tree.root = NULL;
hard_dbg_mseg_tree.order = ADDR_ORDER;
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index b8f0bb7150..d69a79dc2a 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -1420,7 +1420,8 @@ erts_mseg_init(ErtsMsegInit_t *init)
atoms_initialized = 0;
- erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
+ erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
#ifdef ERTS_HAVE_EXEC_MMAPPER
/* Initialize erts_exec_mapper *FIRST*, to increase probability
@@ -1449,7 +1450,8 @@ erts_mseg_init(ErtsMsegInit_t *init)
ma->is_thread_safe = 0;
else {
ma->is_thread_safe = 1;
- erts_mtx_init(&ma->mtx, "mseg");
+ erts_mtx_init(&ma->mtx, "mseg", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
}
ma->is_cache_check_scheduled = 0;
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 5e7ae8953a..52a8b6a53f 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -336,7 +336,7 @@ static void fatal_error_async_signal_safe(char *error_str);
static int max_fds = -1;
static ErtsPollSet pollsets;
-static erts_smp_spinlock_t pollsets_lock;
+static erts_smp_mtx_t pollsets_lock;
#if ERTS_POLL_USE_POLL
@@ -2583,7 +2583,8 @@ ERTS_POLL_EXPORT(erts_poll_max_fds)(void)
void
ERTS_POLL_EXPORT(erts_poll_init)(void)
{
- erts_smp_spinlock_init(&pollsets_lock, "pollsets_lock");
+ erts_smp_mtx_init(&pollsets_lock, "pollsets_lock", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
pollsets = NULL;
errno = 0;
@@ -2689,7 +2690,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
#endif
#ifdef ERTS_SMP
erts_atomic32_init_nob(&ps->polled, 0);
- erts_smp_mtx_init(&ps->mtx, "pollset");
+ erts_smp_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO);
#endif
#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
erts_atomic32_init_nob(&ps->wakeup_state, (erts_aint32_t) 0);
@@ -2731,10 +2732,10 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
#endif
erts_smp_atomic_set_nob(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */
- erts_smp_spin_lock(&pollsets_lock);
+ erts_smp_mtx_lock(&pollsets_lock);
ps->next = pollsets;
pollsets = ps;
- erts_smp_spin_unlock(&pollsets_lock);
+ erts_smp_mtx_unlock(&pollsets_lock);
return ps;
}
@@ -2795,7 +2796,7 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps)
close(ps->timer_fd);
#endif
- erts_smp_spin_lock(&pollsets_lock);
+ erts_smp_mtx_lock(&pollsets_lock);
if (ps == pollsets)
pollsets = pollsets->next;
else {
@@ -2805,7 +2806,7 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps)
ASSERT(ps == prev_ps->next);
prev_ps->next = ps->next;
}
- erts_smp_spin_unlock(&pollsets_lock);
+ erts_smp_mtx_unlock(&pollsets_lock);
erts_free(ERTS_ALC_T_POLLSET, (void *) ps);
}
@@ -3148,3 +3149,26 @@ print_misc_debug_info(void)
}
#endif
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+static void erts_lcnt_enable_pollset_lock_count(ErtsPollSet pollset, int enable) {
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&pollset->mtx.lcnt, "pollset_rm", NIL,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ } else {
+ erts_lcnt_uninstall(&pollset->mtx.lcnt);
+ }
+}
+
+void ERTS_POLL_EXPORT(erts_lcnt_update_pollset_locks)(int enable) {
+ ErtsPollSet iterator;
+
+ erts_smp_mtx_lock(&pollsets_lock);
+
+ for(iterator = pollsets; iterator != NULL; iterator = iterator->next) {
+ erts_lcnt_enable_pollset_lock_count(iterator, enable);
+ }
+
+ erts_smp_mtx_unlock(&pollsets_lock);
+}
+#endif
diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h
index c16122610d..b3b4d79984 100644
--- a/erts/emulator/sys/common/erl_poll.h
+++ b/erts/emulator/sys/common/erl_poll.h
@@ -260,4 +260,8 @@ void ERTS_POLL_EXPORT(erts_poll_get_selected_events)(ErtsPollSet,
int erts_poll_new_table_len(int old_len, int need_len);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void ERTS_POLL_EXPORT(erts_lcnt_update_pollset_locks)(int enable);
+#endif
+
#endif /* #ifndef ERL_POLL_H__ */
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index 22059d21d5..b83837a7d2 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -86,6 +86,10 @@
#include <sys/times.h>
+#ifdef HAVE_SYS_RESOURCE_H
+# include <sys/resource.h>
+#endif
+
#ifdef HAVE_IEEEFP_H
#include <ieeefp.h>
#endif
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 5cf0a49972..50d8a35217 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -438,14 +438,18 @@ erts_sys_pre_init(void)
/* After creation in parent */
eid.thread_create_parent_func = thr_create_cleanup,
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_pre_thr_init();
+#endif
+
erts_thr_init(&eid);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init();
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_post_thr_init();
#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init();
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init();
#endif
#endif /* USE_THREADS */
@@ -1545,7 +1549,8 @@ erl_sys_args(int* argc, char** argv)
{
int i, j;
- erts_smp_rwmtx_init(&environ_rwmtx, "environ");
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
i = 1;
diff --git a/erts/emulator/sys/unix/sys_time.c b/erts/emulator/sys/unix/sys_time.c
index 4f26639703..102ef7bebf 100644
--- a/erts/emulator/sys/unix/sys_time.c
+++ b/erts/emulator/sys/unix/sys_time.c
@@ -304,8 +304,8 @@ sys_init_time(ErtsSysInitTimeResult *init_resp)
erts_sys_time_data__.r.o.os_times =
clock_gettime_times_verified;
#endif
- erts_smp_mtx_init(&internal_state.w.f.mtx,
- "os_monotonic_time");
+ erts_smp_mtx_init(&internal_state.w.f.mtx, "os_monotonic_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
internal_state.w.f.last_delivered
= clock_gettime_monotonic();
init_resp->os_monotonic_time_info.locked_use = 1;
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index b10fc1e430..8743f83a50 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -142,7 +142,8 @@ static erts_mtx_t save_ops_mtx;
static void poll_debug_init(void)
{
- erts_mtx_init(&save_ops_mtx, "save_ops_lock");
+ erts_mtx_init(&save_ops_mtx, "save_ops_lock", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
}
void poll_debug_set_active_fd(ErtsSysFdType fd)
@@ -677,7 +678,7 @@ static void new_waiter(ErtsPollSet ps)
w->active_events = 1;
w->highwater = 1;
w->total_events = 1;
- erts_mtx_init(&w->mtx, "pollwaiter");
+ erts_mtx_init(&w->mtx, "pollwaiter", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO);
/*
@@ -1359,7 +1360,7 @@ ErtsPollSet erts_poll_create_pollset(void)
erts_atomic32_init_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
#ifdef ERTS_SMP
- erts_smp_mtx_init(&ps->mtx, "pollset");
+ erts_smp_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO);
#endif
init_timeout_time(ps);
@@ -1411,7 +1412,8 @@ void erts_poll_init(void)
HARDTRACEF(("In erts_poll_init"));
erts_sys_break_event = CreateManualEvent(FALSE);
- erts_mtx_init(&break_waiter_lock,"break_waiter_lock");
+ erts_mtx_init(&break_waiter_lock, "break_waiter_lock", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
break_happened_event = CreateManualEvent(FALSE);
erts_atomic32_init_nob(&break_waiter_state, 0);
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index 28019e306c..15c59109b1 100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -3195,15 +3195,22 @@ erts_sys_pre_init(void)
/* After creation in parent */
eid.thread_create_parent_func = thr_create_cleanup;
- erts_thr_init(&eid);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init();
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_pre_thr_init();
#endif
+
+ erts_thr_init(&eid);
+
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init();
+ erts_lcnt_post_thr_init();
#endif
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init();
#endif
+#endif /* USE_THREADS */
+
erts_init_sys_time_sup();
erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0);
diff --git a/erts/emulator/sys/win32/sys_env.c b/erts/emulator/sys/win32/sys_env.c
index 21ef71ad9a..8fcee1cbb6 100644
--- a/erts/emulator/sys/win32/sys_env.c
+++ b/erts/emulator/sys/win32/sys_env.c
@@ -37,7 +37,8 @@ static erts_smp_rwmtx_t environ_rwmtx;
void
erts_sys_env_init(void)
{
- erts_smp_rwmtx_init(&environ_rwmtx, "environ");
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
int
diff --git a/erts/emulator/sys/win32/sys_time.c b/erts/emulator/sys/win32/sys_time.c
index e8c67b3928..88131aaa6a 100644
--- a/erts/emulator/sys/win32/sys_time.c
+++ b/erts/emulator/sys/win32/sys_time.c
@@ -300,8 +300,8 @@ sys_init_time(ErtsSysInitTimeResult *init_resp)
module = GetModuleHandle(kernel_dll_name);
if (!module) {
get_tick_count:
- erts_smp_mtx_init(&internal_state.w.f.mtime_mtx,
- "os_monotonic_time");
+ erts_smp_mtx_init(&internal_state.w.f.mtime_mtx, "os_monotonic_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
internal_state.w.f.wrap = 0;
internal_state.w.f.last_tick_count = 0;
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index fcd7244ae9..b17170c8b8 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -71,8 +71,10 @@ MODULES= \
hash_SUITE \
hibernate_SUITE \
hipe_SUITE \
+ iovec_SUITE \
list_bif_SUITE \
lttng_SUITE \
+ lcnt_SUITE \
map_SUITE \
match_spec_SUITE \
module_info_SUITE \
diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl
index 339c827602..04b7f2de15 100644
--- a/erts/emulator/test/bif_SUITE.erl
+++ b/erts/emulator/test/bif_SUITE.erl
@@ -24,7 +24,7 @@
-include_lib("kernel/include/file.hrl").
-export([all/0, suite/0,
- display/1, display_huge/0,
+ display/1, display_huge/0, display_string/1,
erl_bif_types/1,guard_bifs_in_erl_bif_types/1,
shadow_comments/1,list_to_utf8_atom/1,
specs/1,improper_bif_stubs/1,auto_imports/1,
@@ -43,7 +43,7 @@ all() ->
[erl_bif_types, guard_bifs_in_erl_bif_types, shadow_comments,
specs, improper_bif_stubs, auto_imports,
t_list_to_existing_atom, os_env, otp_7526,
- display, list_to_utf8_atom,
+ display, display_string, list_to_utf8_atom,
atom_to_binary, binary_to_atom, binary_to_existing_atom,
erl_crash_dump_bytes, min_max, erlang_halt, is_builtin,
error_stacktrace, error_stacktrace_during_call_trace].
@@ -68,6 +68,28 @@ deeep(N,Acc) ->
deeep(N) ->
deeep(N,[hello]).
+display_string(Config) when is_list(Config) ->
+ true = erlang:display_string("hej"),
+ true = erlang:display_string(""),
+ true = erlang:display_string("hopp"),
+ true = erlang:display_string("\n"),
+ true = erlang:display_string(lists:seq(1100,1200)),
+ {error,badarg} = try
+ erlang:display_string(atom),
+ ok
+ catch
+ T0:E0 ->
+ {T0, E0}
+ end,
+ {error,badarg} = try
+ erlang:display_string(make_ref()),
+ ok
+ catch
+ T1:E1 ->
+ {T1, E1}
+ end,
+ ok.
+
erl_bif_types(Config) when is_list(Config) ->
ensure_erl_bif_types_compiled(),
@@ -503,6 +525,8 @@ binary_to_atom(Config) when is_list(Config) ->
?BADARG(binary_to_atom(id(<<255>>), utf8)),
?BADARG(binary_to_atom(id(<<255,0>>), utf8)),
?BADARG(binary_to_atom(id(<<16#C0,16#80>>), utf8)), %Overlong 0.
+ <<B:1/binary, _/binary>> = id(<<194, 163>>), %Truncated character ERL-474
+ ?BADARG(binary_to_atom(B, utf8)),
%% system_limit failures.
?SYS_LIMIT(binary_to_atom(id(<<0:512/unit:8,255>>), utf8)),
@@ -691,6 +715,9 @@ erlang_halt(Config) when is_list(Config) ->
{badrpc,nodedown} = rpc:call(N3, erlang, halt, [0,[]]),
{ok,N4} = slave:start(H, halt_node4),
{badrpc,nodedown} = rpc:call(N4, erlang, halt, [lists:duplicate(300,$x)]),
+ %% Test unicode slogan
+ {ok,N4} = slave:start(H, halt_node4),
+ {badrpc,nodedown} = rpc:call(N4, erlang, halt, [[339,338,254,230,198,295,167,223,32,12507,12531,12480]]),
% This test triggers a segfault when dumping a crash dump
% to make sure that we can handle it properly.
diff --git a/erts/emulator/test/big_SUITE.erl b/erts/emulator/test/big_SUITE.erl
index 402751393a..c308760211 100644
--- a/erts/emulator/test/big_SUITE.erl
+++ b/erts/emulator/test/big_SUITE.erl
@@ -24,6 +24,7 @@
-export([t_div/1, eq_28/1, eq_32/1, eq_big/1, eq_math/1, big_literals/1,
borders/1, negative/1, big_float_1/1, big_float_2/1,
+ bxor_2pow/1,
shift_limit_1/1, powmod/1, system_limit/1, toobig/1, otp_6692/1]).
%% Internal exports.
@@ -42,6 +43,7 @@ suite() ->
all() ->
[t_div, eq_28, eq_32, eq_big, eq_math, big_literals,
borders, negative, {group, big_float}, shift_limit_1,
+ bxor_2pow,
powmod, system_limit, toobig, otp_6692].
groups() ->
@@ -396,3 +398,54 @@ loop2(X,Y,N,M) ->
end,
loop2(X,Y,N+1,M).
+
+%% ERL-450
+bxor_2pow(_Config) ->
+ IL = lists:seq(8*3, 8*16, 4),
+ JL = lists:seq(0, 64),
+ [bxor_2pow_1((1 bsl I), (1 bsl J))
+ || I <- IL, J <- JL],
+ ok.
+
+bxor_2pow_1(A, B) ->
+ for(-1,1, fun(Ad) ->
+ for(-1,1, fun(Bd) ->
+ bxor_2pow_2(A+Ad, B+Bd),
+ bxor_2pow_2(-A+Ad, B+Bd),
+ bxor_2pow_2(A+Ad, -B+Bd),
+ bxor_2pow_2(-A+Ad, -B+Bd)
+ end)
+ end).
+
+for(From, To, _Fun) when From > To ->
+ ok;
+for(From, To, Fun) ->
+ Fun(From),
+ for(From+1, To, Fun).
+
+bxor_2pow_2(A, B) ->
+ Correct = my_bxor(A, B),
+ case A bxor B of
+ Correct -> ok;
+ Wrong ->
+ io:format("~.16b bxor ~.16b\n", [A,B]),
+ io:format("Expected ~.16b\n", [Correct]),
+ io:format("Got ~.16b\n", [Wrong]),
+ ct:fail({failed, 'bxor'})
+
+ end.
+
+%% Implement bxor without bxor
+my_bxor(A, B) ->
+ my_bxor(A, B, 0, 0).
+
+my_bxor(0, 0, _, Acc) -> Acc;
+my_bxor(-1, -1, _, Acc) -> Acc;
+my_bxor(-1, 0, N, Acc) -> (-1 bsl N) bor Acc; % sign extension
+my_bxor(0, -1, N, Acc) -> (-1 bsl N) bor Acc; % sign extension
+my_bxor(A, B, N, Acc0) ->
+ Acc1 = case (A band 1) =:= (B band 1) of
+ true -> Acc0;
+ false -> Acc0 bor (1 bsl N)
+ end,
+ my_bxor(A bsr 1, B bsr 1, N+1, Acc1).
diff --git a/erts/emulator/test/big_SUITE_data/borders.dat b/erts/emulator/test/big_SUITE_data/borders.dat
index 52e4f35861..c38ff93383 100644
--- a/erts/emulator/test/big_SUITE_data/borders.dat
+++ b/erts/emulator/test/big_SUITE_data/borders.dat
@@ -1114,3 +1114,38 @@
1 = 16#800000000000001 rem (-16#800000000000000).
0 = 16#FFFFFFFFFFFFFFF800000000 rem 16#FFFFFFFFFFFFFFF80.
+% ERL-450 bxor of big negative 2-pow
+-(1 bsl 8) bxor -1 = 16#ff.
+-(1 bsl 16) bxor -1 = 16#ffff.
+-(1 bsl 24) bxor -1 = 16#ffffff.
+-(1 bsl 32) bxor -1 = 16#ffffffff.
+-(1 bsl 40) bxor -1 = 16#ffffffffff.
+-(1 bsl 48) bxor -1 = 16#ffffffffffff.
+-(1 bsl 56) bxor -1 = 16#ffffffffffffff.
+-(1 bsl 64) bxor -1 = 16#ffffffffffffffff.
+-(1 bsl 72) bxor -1 = 16#ffffffffffffffffff.
+-(1 bsl 80) bxor -1 = 16#ffffffffffffffffffff.
+-(1 bsl 88) bxor -1 = 16#ffffffffffffffffffffff.
+-(1 bsl 96) bxor -1 = 16#ffffffffffffffffffffffff.
+-(1 bsl 104) bxor -1 = 16#ffffffffffffffffffffffffff.
+-(1 bsl 112) bxor -1 = 16#ffffffffffffffffffffffffffff.
+-(1 bsl 120) bxor -1 = 16#ffffffffffffffffffffffffffffff.
+-(1 bsl 128) bxor -1 = 16#ffffffffffffffffffffffffffffffff.
+-(1 bsl 136) bxor -1 = 16#ffffffffffffffffffffffffffffffffff.
+-(1 bsl 8) bxor 1 = -16#ff.
+-(1 bsl 16) bxor 1 = -16#ffff.
+-(1 bsl 24) bxor 1 = -16#ffffff.
+-(1 bsl 32) bxor 1 = -16#ffffffff.
+-(1 bsl 40) bxor 1 = -16#ffffffffff.
+-(1 bsl 48) bxor 1 = -16#ffffffffffff.
+-(1 bsl 56) bxor 1 = -16#ffffffffffffff.
+-(1 bsl 64) bxor 1 = -16#ffffffffffffffff.
+-(1 bsl 72) bxor 1 = -16#ffffffffffffffffff.
+-(1 bsl 80) bxor 1 = -16#ffffffffffffffffffff.
+-(1 bsl 88) bxor 1 = -16#ffffffffffffffffffffff.
+-(1 bsl 96) bxor 1 = -16#ffffffffffffffffffffffff.
+-(1 bsl 104) bxor 1 = -16#ffffffffffffffffffffffffff.
+-(1 bsl 112) bxor 1 = -16#ffffffffffffffffffffffffffff.
+-(1 bsl 120) bxor 1 = -16#ffffffffffffffffffffffffffffff.
+-(1 bsl 128) bxor 1 = -16#ffffffffffffffffffffffffffffffff.
+-(1 bsl 136) bxor 1 = -16#ffffffffffffffffffffffffffffffffff.
diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl
index 4d17276e5c..61536bacd7 100644
--- a/erts/emulator/test/binary_SUITE.erl
+++ b/erts/emulator/test/binary_SUITE.erl
@@ -599,6 +599,9 @@ bad_binary_to_term(Config) when is_list(Config) ->
%% Bad float.
bad_bin_to_term(<<131,70,-1:64>>),
+
+ %% Truncated UTF8 character (ERL-474)
+ bad_bin_to_term(<<131,119,1,194,163>>),
ok.
bad_bin_to_term(BadBin) ->
diff --git a/erts/emulator/test/bs_construct_SUITE.erl b/erts/emulator/test/bs_construct_SUITE.erl
index b79f4b995d..ce50bcdd86 100644
--- a/erts/emulator/test/bs_construct_SUITE.erl
+++ b/erts/emulator/test/bs_construct_SUITE.erl
@@ -905,14 +905,28 @@ bs_add_overflow(_Config) ->
_ when Memsize < (2 bsl 30) ->
{skip, "Less then 2 GB of memory"};
4 ->
- Large = <<0:((1 bsl 30)-1)>>,
- {'EXIT',{system_limit,_}} =
- (catch <<Large/bits, Large/bits, Large/bits, Large/bits,
- Large/bits, Large/bits, Large/bits, Large/bits,
- Large/bits>>),
+ {'EXIT', {system_limit, _}} = (catch bs_add_overflow_signed()),
+ {'EXIT', {system_limit, _}} = (catch bs_add_overflow_unsigned()),
ok
end.
+bs_add_overflow_signed() ->
+ %% Produce a large result of bs_add that, if cast to signed int, would
+ %% overflow into a negative number that fits a smallnum.
+ Large = <<0:((1 bsl 30)-1)>>,
+ <<Large/bits, Large/bits, Large/bits, Large/bits,
+ Large/bits, Large/bits, Large/bits, Large/bits,
+ Large/bits>>.
+
+bs_add_overflow_unsigned() ->
+ %% Produce a large result of bs_add that goes beyond the limit of an
+ %% unsigned word. This used to succeed but produced an incorrect result
+ %% where B =:= C!
+ A = <<0:((1 bsl 32)-8)>>,
+ B = <<2, 3>>,
+ C = <<A/binary,1,B/binary>>,
+ true = byte_size(B) < byte_size(C).
+
id(I) -> I.
memsize() ->
diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl
index e4640909aa..7d29ebec52 100644
--- a/erts/emulator/test/fun_SUITE.erl
+++ b/erts/emulator/test/fun_SUITE.erl
@@ -22,6 +22,7 @@
-export([all/0, suite/0,
bad_apply/1,bad_fun_call/1,badarity/1,ext_badarity/1,
+ bad_arglist/1,
equality/1,ordering/1,
fun_to_port/1,t_phash/1,t_phash2/1,md5/1,
refc/1,refc_ets/1,refc_dist/1,
@@ -39,6 +40,7 @@ suite() ->
all() ->
[bad_apply, bad_fun_call, badarity, ext_badarity,
+ bad_arglist,
equality, ordering, fun_to_port, t_phash,
t_phash2, md5, refc, refc_ets, refc_dist,
const_propagation, t_arity, t_is_function2, t_fun_info,
@@ -107,6 +109,18 @@ bad_call_fc(Fun) ->
ct:fail({bad_result,Other})
end.
+% Test erlang:apply with non-proper arg-list
+bad_arglist(Config) when is_list(Config) ->
+ Fun = fun(A,B) -> A+B end,
+ {'EXIT', {badarg,_}} = (catch apply(Fun, 17)),
+ {'EXIT', {badarg,_}} = (catch apply(Fun, [17|18])),
+ {'EXIT', {badarg,_}} = (catch apply(Fun, [17,18|19])),
+ {'EXIT', {badarg,_}} = (catch apply(lists,seq, 17)),
+ {'EXIT', {badarg,_}} = (catch apply(lists,seq, [17|18])),
+ {'EXIT', {badarg,_}} = (catch apply(lists,seq, [17,18|19])),
+ ok.
+
+
%% Call and apply valid funs with wrong number of arguments.
badarity(Config) when is_list(Config) ->
diff --git a/erts/emulator/test/iovec_SUITE.erl b/erts/emulator/test/iovec_SUITE.erl
new file mode 100644
index 0000000000..a5f605bfff
--- /dev/null
+++ b/erts/emulator/test/iovec_SUITE.erl
@@ -0,0 +1,168 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(iovec_SUITE).
+
+-export([all/0, suite/0]).
+
+-export([integer_lists/1, binary_lists/1, empty_lists/1, empty_binary_lists/1,
+ mixed_lists/1, improper_lists/1, illegal_lists/1, cons_bomb/1,
+ iolist_to_iovec_idempotence/1, iolist_to_iovec_correctness/1]).
+
+-include_lib("common_test/include/ct.hrl").
+
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap, {minutes, 2}}].
+
+all() ->
+ [integer_lists, binary_lists, empty_lists, empty_binary_lists, mixed_lists,
+ illegal_lists, improper_lists, cons_bomb, iolist_to_iovec_idempotence,
+ iolist_to_iovec_correctness].
+
+integer_lists(Config) when is_list(Config) ->
+ Variations = gen_variations([I || I <- lists:seq(1, 255)]),
+
+ equivalence_test(fun erlang:iolist_to_iovec/1, Variations),
+
+ ok.
+
+binary_lists(Config) when is_list(Config) ->
+ Variations = gen_variations([<<I:8>> || I <- lists:seq(1, 255)]),
+ equivalence_test(fun erlang:iolist_to_iovec/1, Variations),
+ ok.
+
+empty_lists(Config) when is_list(Config) ->
+ Variations = gen_variations([[] || _ <- lists:seq(1, 256)]),
+ equivalence_test(fun erlang:iolist_to_iovec/1, Variations),
+ [] = erlang:iolist_to_iovec([]),
+ ok.
+
+empty_binary_lists(Config) when is_list(Config) ->
+ Variations = gen_variations([<<>> || _ <- lists:seq(1, 8192)]),
+ equivalence_test(fun erlang:iolist_to_iovec/1, Variations),
+ [] = erlang:iolist_to_iovec(Variations),
+ ok.
+
+mixed_lists(Config) when is_list(Config) ->
+ Variations = gen_variations([<<>>, lists:seq(1, 40), <<12, 45, 78>>]),
+ equivalence_test(fun erlang:iolist_to_iovec/1, Variations),
+ ok.
+
+illegal_lists(Config) when is_list(Config) ->
+ BitStrs = gen_variations(["gurka", <<1:1>>, "gaffel"]),
+ BadInts = gen_variations(["gurka", 890, "gaffel"]),
+ Atoms = gen_variations([gurka, "gaffel"]),
+ BadTails = [["test" | 0], ["gurka", gaffel]],
+
+ Variations =
+ BitStrs ++ BadInts ++ Atoms ++ BadTails,
+
+ illegality_test(fun erlang:iolist_to_iovec/1, Variations),
+
+ ok.
+
+improper_lists(Config) when is_list(Config) ->
+ Variations = [
+ [[[[1 | <<2>>] | <<3>>] | <<4>>] | <<5>>],
+ [[<<"test">>, 3] | <<"improper tail">>],
+ [1, 2, 3 | <<"improper tail">>]
+ ],
+ equivalence_test(fun erlang:iolist_to_iovec/1, Variations),
+ ok.
+
+cons_bomb(Config) when is_list(Config) ->
+ IntBase = gen_variations([I || I <- lists:seq(1, 255)]),
+ BinBase = gen_variations([<<I:8>> || I <- lists:seq(1, 255)]),
+ MixBase = gen_variations([<<12, 45, 78>>, lists:seq(1, 255)]),
+
+ Rounds =
+ case system_mem_size() of
+ Mem when Mem >= (16 bsl 30) -> 32;
+ Mem when Mem >= (3 bsl 30) -> 28;
+ _ -> 20
+ end,
+
+ Variations = gen_variations([IntBase, BinBase, MixBase], Rounds),
+ equivalence_test(fun erlang:iolist_to_iovec/1, Variations),
+ ok.
+
+iolist_to_iovec_idempotence(Config) when is_list(Config) ->
+ IntVariations = gen_variations([I || I <- lists:seq(1, 255)]),
+ BinVariations = gen_variations([<<I:8>> || I <- lists:seq(1, 255)]),
+ MixVariations = gen_variations([<<12, 45, 78>>, lists:seq(1, 255)]),
+
+ Variations = [IntVariations, BinVariations, MixVariations],
+ Optimized = erlang:iolist_to_iovec(Variations),
+
+ true = Optimized =:= erlang:iolist_to_iovec(Optimized),
+ ok.
+
+iolist_to_iovec_correctness(Config) when is_list(Config) ->
+ IntVariations = gen_variations([I || I <- lists:seq(1, 255)]),
+ BinVariations = gen_variations([<<I:8>> || I <- lists:seq(1, 255)]),
+ MixVariations = gen_variations([<<12, 45, 78>>, lists:seq(1, 255)]),
+
+ Variations = [IntVariations, BinVariations, MixVariations],
+ Optimized = erlang:iolist_to_iovec(Variations),
+
+ true = is_iolist_equal(Optimized, Variations),
+ ok.
+
+illegality_test(Fun, Variations) ->
+ [{'EXIT',{badarg, _}} = (catch Fun(Variation)) || Variation <- Variations].
+
+equivalence_test(Fun, [Head | _] = Variations) ->
+ Comparand = Fun(Head),
+ [is_iolist_equal(Comparand, Fun(Variation)) || Variation <- Variations],
+ ok.
+
+is_iolist_equal(A, B) ->
+ iolist_to_binary(A) =:= iolist_to_binary(B).
+
+%% Generates a bunch of lists whose contents will be equal to Base repeated a
+%% few times. The lists only differ by their structure, so their reduction to
+%% a simpler format should yield the same result.
+gen_variations(Base) ->
+ gen_variations(Base, 16).
+gen_variations(Base, N) ->
+ [gen_flat_list(Base, N),
+ gen_nested_list(Base, N),
+ gen_nasty_list(Base, N)].
+
+gen_flat_list(Base, N) ->
+ lists:flatten(gen_nested_list(Base, N)).
+
+gen_nested_list(Base, N) ->
+ [Base || _ <- lists:seq(1, N)].
+
+gen_nasty_list(Base, N) ->
+ gen_nasty_list_1(gen_nested_list(Base, N), []).
+gen_nasty_list_1([], Result) ->
+ Result;
+gen_nasty_list_1([Head | Base], Result) when is_list(Head) ->
+ gen_nasty_list_1(Base, [[Result], [gen_nasty_list_1(Head, [])]]);
+gen_nasty_list_1([Head | Base], Result) ->
+ gen_nasty_list_1(Base, [[Result], [Head]]).
+
+system_mem_size() ->
+ application:ensure_all_started(os_mon),
+ {Tot,_Used,_} = memsup:get_memory_data(),
+ Tot.
diff --git a/erts/emulator/test/lcnt_SUITE.erl b/erts/emulator/test/lcnt_SUITE.erl
new file mode 100644
index 0000000000..504b9b54cf
--- /dev/null
+++ b/erts/emulator/test/lcnt_SUITE.erl
@@ -0,0 +1,156 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(lcnt_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-export(
+ [all/0, suite/0,
+ init_per_suite/1, end_per_suite/1,
+ init_per_testcase/2, end_per_testcase/2]).
+
+-export(
+ [toggle_lock_counting/1, error_on_invalid_category/1, preserve_locks/1]).
+
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap, {seconds, 10}}].
+
+all() ->
+ [toggle_lock_counting, error_on_invalid_category, preserve_locks].
+
+init_per_suite(Config) ->
+ case erlang:system_info(lock_counting) of
+ true ->
+ %% The tests will run straight over these properties, so we have to
+ %% preserve them to avoid tainting the other tests.
+ OldCopySave = erts_debug:lcnt_control(copy_save),
+ OldMask = erts_debug:lcnt_control(mask),
+ [{lcnt_SUITE, {OldCopySave, OldMask}} | Config];
+ _ ->
+ {skip, "Lock counting is not enabled"}
+ end.
+
+end_per_suite(Config) ->
+ {OldCopySave, OldMask} = proplists:get_value(lcnt_SUITE, Config),
+
+ erts_debug:lcnt_control(copy_save, OldCopySave),
+ OldCopySave = erts_debug:lcnt_control(copy_save),
+
+ erts_debug:lcnt_control(mask, OldMask),
+ OldMask = erts_debug:lcnt_control(mask),
+
+ erts_debug:lcnt_clear(),
+ ok.
+
+init_per_testcase(_Case, Config) ->
+ disable_lock_counting(),
+ Config.
+
+end_per_testcase(_Case, _Config) ->
+ ok.
+
+disable_lock_counting() ->
+ ok = erts_debug:lcnt_control(copy_save, false),
+ ok = erts_debug:lcnt_control(mask, []),
+ ok = erts_debug:lcnt_clear(),
+
+ %% Sanity check.
+ false = erts_debug:lcnt_control(copy_save),
+ [] = erts_debug:lcnt_control(mask),
+
+ %% The above commands rely on some lazy operations, so we'll have to wait
+ %% for the list to clear.
+ ok = wait_for_empty_lock_list().
+
+wait_for_empty_lock_list() ->
+ wait_for_empty_lock_list(10).
+wait_for_empty_lock_list(Tries) when Tries > 0 ->
+ try_flush_cleanup_ops(),
+ case erts_debug:lcnt_collect() of
+ [{duration, _}, {locks, []}] ->
+ ok;
+ _ ->
+ timer:sleep(50),
+ wait_for_empty_lock_list(Tries - 1)
+ end;
+wait_for_empty_lock_list(0) ->
+ ct:fail("Lock list failed to clear after disabling lock counting.").
+
+%% Queue up a lot of thread progress cleanup ops in a vain attempt to
+%% flush the lock list.
+try_flush_cleanup_ops() ->
+ false = lists:member(process, erts_debug:lcnt_control(mask)),
+ [spawn(fun() -> ok end) || _ <- lists:seq(1, 1000)].
+
+%%
+%% Test cases
+%%
+
+toggle_lock_counting(Config) when is_list(Config) ->
+ Categories =
+ [allocator, db, debug, distribution, generic, io, process, scheduler],
+ lists:foreach(
+ fun(Category) ->
+ Locks = get_lock_info_for(Category),
+ if
+ Locks =/= [] ->
+ disable_lock_counting();
+ Locks =:= [] ->
+ ct:fail("Failed to toggle ~p locks.", [Category])
+ end
+ end, Categories).
+
+get_lock_info_for(Categories) when is_list(Categories) ->
+ ok = erts_debug:lcnt_control(mask, Categories),
+ [{duration, _}, {locks, Locks}] = erts_debug:lcnt_collect(),
+ Locks;
+
+get_lock_info_for(Category) when is_atom(Category) ->
+ get_lock_info_for([Category]).
+
+preserve_locks(Config) when is_list(Config) ->
+ erts_debug:lcnt_control(mask, [process]),
+
+ erts_debug:lcnt_control(copy_save, true),
+ [spawn(fun() -> ok end) || _ <- lists:seq(1, 1000)],
+
+ %% Wait for the processes to be fully destroyed before disabling copy_save,
+ %% then remove all active locks from the list. (There's no foolproof method
+ %% to do this; sleeping before/after is the best way we have)
+ timer:sleep(500),
+
+ erts_debug:lcnt_control(copy_save, false),
+ erts_debug:lcnt_control(mask, []),
+
+ try_flush_cleanup_ops(),
+ timer:sleep(500),
+
+ case erts_debug:lcnt_collect() of
+ [{duration, _}, {locks, Locks}] when length(Locks) > 0 ->
+ ct:pal("Preserved ~p locks.", [length(Locks)]);
+ [{duration, _}, {locks, []}] ->
+ ct:fail("copy_save didn't preserve any locks.")
+ end.
+
+error_on_invalid_category(Config) when is_list(Config) ->
+ {error, badarg, q_invalid} = erts_debug:lcnt_control(mask, [q_invalid]),
+ ok.
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index 05c250125d..4811244b98 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -61,7 +61,8 @@
nif_internal_hash_salted/1,
nif_phash2/1,
nif_whereis/1, nif_whereis_parallel/1,
- nif_whereis_threaded/1, nif_whereis_proxy/1
+ nif_whereis_threaded/1, nif_whereis_proxy/1,
+ nif_ioq/1
]).
-export([many_args_100/100]).
@@ -99,7 +100,8 @@ all() ->
nif_internal_hash,
nif_internal_hash_salted,
nif_phash2,
- nif_whereis, nif_whereis_parallel, nif_whereis_threaded].
+ nif_whereis, nif_whereis_parallel, nif_whereis_threaded,
+ nif_ioq].
groups() ->
[{G, [], api_repeaters()} || G <- api_groups()]
@@ -2886,11 +2888,15 @@ nif_whereis_parallel(Config) when is_list(Config) ->
true = lists:all(PidReg, Procs),
%% tell them all to 'fire' as fast as we can
- [P ! {Ref, send_proc} || {_, P, _} <- Procs],
+ repeat(10, fun(_) ->
+ [P ! {Ref, send_proc} || {_, P, _} <- Procs]
+ end, void),
%% each gets forwarded through two processes
- true = lists:all(RecvNum, NSeq),
- true = lists:all(RecvNum, NSeq),
+ repeat(10, fun(_) ->
+ true = lists:all(RecvNum, NSeq),
+ true = lists:all(RecvNum, NSeq)
+ end, void),
%% tell them all to 'quit' by name
[N ! {Ref, quit} || {N, _, _} <- Procs],
@@ -2953,6 +2959,180 @@ nif_whereis_proxy(Ref) ->
{Ref, quit} ->
ok
end.
+nif_ioq(Config) ->
+ ensure_lib_loaded(Config),
+
+ Script =
+ [{create, a},
+
+ %% Test enq of erlang term binary
+ {enqb, a},
+ {enqb, a, 3},
+
+ %% Test enq of non-erlang term binary
+ {enqbraw,a},
+ {enqbraw,a, 5},
+ {peek, a},
+ {deq, a, 42},
+
+ %% Test enqv
+ {enqv, a, 2, 100},
+ {deq, a, all},
+
+ %% This skips all elements but one in the iolist
+ {enqv, a, 5, iolist_size(nif_ioq_payload(5)) - 1},
+ {peek, a},
+
+ %% Test to enqueue a bunch of refc binaries
+ {enqv, a, [nif_ioq_payload(refcbin) || _ <- lists:seq(1,20)], 0},
+
+ %% Enq stuff to destroy with data in queue
+ {enqv, a, 2, 100},
+ {destroy,a},
+
+ %% Test destroy of new queue
+ {create, a},
+ {destroy,a}
+ ],
+
+ nif_ioq_run(Script),
+
+ %% Test that only enif_inspect_as_vec works
+ Payload = nif_ioq_payload(5),
+ PayloadBin = iolist_to_binary(Payload),
+
+ [begin
+ PayloadBin = iolist_to_binary(ioq_nif(inspect,Payload,Stack,Env)),
+ <<>> = iolist_to_binary(ioq_nif(inspect,[],Stack,Env))
+ end || Stack <- [no_stack, use_stack], Env <- [use_env, no_env]],
+
+ %% Test error cases
+
+ Q = ioq_nif(create),
+
+ {'EXIT', {badarg, _}} = (catch ioq_nif(deq, Q, 1)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(enqv, Q, 1, 1234)),
+
+ {'EXIT', {badarg, _}} = (catch ioq_nif(enqv, Q, [atom_in_list], 0)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(enqv, Q, [make_ref()], 0)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(enqv, Q, [256], 0)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(enqv, Q, [-1], 0)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(enqv, Q, [#{}], 0)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(enqv, Q, [1 bsl 64], 0)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(enqv, Q, [{tuple}], 0)),
+
+ {'EXIT', {badarg, _}} = (catch ioq_nif(inspect, [atom_in_list], use_stack)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(inspect, [make_ref()], no_stack)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(inspect, [256], use_stack)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(inspect, [-1], no_stack)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(inspect, [#{}], use_stack)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(inspect, [1 bsl 64], no_stack)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(inspect, [{tuple}], use_stack)),
+ {'EXIT', {badarg, _}} = (catch ioq_nif(inspect, <<"binary">>, use_stack)),
+
+ ioq_nif(destroy, Q),
+
+ %% Test that the example in the docs works
+ ExampleQ = ioq_nif(create),
+ true = ioq_nif(example, ExampleQ, nif_ioq_payload(5)),
+ ioq_nif(destroy, ExampleQ),
+
+ ok.
+
+
+nif_ioq_run(Script) ->
+ nif_ioq_run(Script, #{}).
+
+nif_ioq_run([{Action, Name}|T], State)
+ when Action =:= enqb; Action =:= enqbraw ->
+ nif_ioq_run([{Action, Name, heapbin}|T], State);
+nif_ioq_run([{Action, Name, Skip}|T], State)
+ when Action =:= enqb, is_integer(Skip);
+ Action =:= enqbraw, is_integer(Skip) ->
+ nif_ioq_run([{Action, Name, heapbin, Skip}|T], State);
+nif_ioq_run([{Action, Name, N}|T], State)
+ when Action =:= enqv; Action =:= enqb; Action =:= enqbraw ->
+ nif_ioq_run([{Action, Name, N, 0}|T], State);
+nif_ioq_run([{Action, Name, N, Skip}|T], State)
+ when Action =:= enqv; Action =:= enqb; Action =:= enqbraw ->
+
+ #{ q := IOQ, b := B } = Q = maps:get(Name, State),
+ true = ioq_nif(size, IOQ) == iolist_size(B),
+
+ %% Sanitize the log output a bit so that it doesn't become too large.
+ H = {Action, Name, try iolist_size(N) of Sz -> Sz catch _:_ -> N end, Skip},
+ ct:log("~p", [H]),
+
+ Data = nif_ioq_payload(N),
+ ioq_nif(Action, IOQ, Data, Skip),
+
+ <<_:Skip/binary, SkippedData/binary>> = iolist_to_binary(Data),
+
+ true = ioq_nif(size, IOQ) == (iolist_size([B|SkippedData])),
+
+ nif_ioq_run(T, State#{ Name := Q#{ b := [B|SkippedData]}});
+nif_ioq_run([{peek, Name} = H|T], State) ->
+ #{ q := IOQ, b := B } = maps:get(Name, State),
+ true = ioq_nif(size, IOQ) == iolist_size(B),
+
+ ct:log("~p", [H]),
+
+ Data = ioq_nif(peek, IOQ, ioq_nif(size, IOQ)),
+
+ true = iolist_to_binary(B) == iolist_to_binary(Data),
+ nif_ioq_run(T, State);
+nif_ioq_run([{deq, Name, all}|T], State) ->
+ #{ q := IOQ, b := B } = maps:get(Name, State),
+ Size = ioq_nif(size, IOQ),
+ true = Size == iolist_size(B),
+ nif_ioq_run([{deq, Name, Size}|T], State);
+nif_ioq_run([{deq, Name, N} = H|T], State) ->
+ #{ q := IOQ, b := B } = Q = maps:get(Name, State),
+ true = ioq_nif(size, IOQ) == iolist_size(B),
+
+ ct:log("~p", [H]),
+
+ <<_:N/binary,Remain/binary>> = iolist_to_binary(B),
+ NewQ = Q#{ b := Remain },
+
+ Sz = ioq_nif(deq, IOQ, N),
+
+ true = Sz == iolist_size(Remain),
+ true = ioq_nif(size, IOQ) == iolist_size(Remain),
+
+ nif_ioq_run(T, State#{ Name := NewQ });
+nif_ioq_run([{create, Name} = H|T], State) ->
+ ct:log("~p", [H]),
+ nif_ioq_run(T, State#{ Name => #{ q => ioq_nif(create), b => [] } });
+nif_ioq_run([{destroy, Name} = H|T], State) ->
+ #{ q := IOQ, b := B } = maps:get(Name, State),
+ true = ioq_nif(size, IOQ) == iolist_size(B),
+
+ ct:log("~p", [H]),
+
+ ioq_nif(destroy, IOQ),
+
+ nif_ioq_run(T, maps:remove(Name, State));
+nif_ioq_run([], State) ->
+ State.
+
+nif_ioq_payload(N) when is_integer(N) ->
+ Tail = if N > 3 -> nif_ioq_payload(N-3); true -> [] end,
+ Head = element(1, lists:split(N,[nif_ioq_payload(subbin),
+ nif_ioq_payload(heapbin),
+ nif_ioq_payload(refcbin) | Tail])),
+ erlang:iolist_to_iovec(Head);
+nif_ioq_payload(subbin) ->
+ Bin = nif_ioq_payload(refcbin),
+ Sz = size(Bin) - 1,
+ <<_:8,SubBin:Sz/binary,_/bits>> = Bin,
+ SubBin;
+nif_ioq_payload(heapbin) ->
+ <<"a literal heap binary">>;
+nif_ioq_payload(refcbin) ->
+ iolist_to_binary([lists:seq(1,255) || _ <- lists:seq(1,255)]);
+nif_ioq_payload(Else) ->
+ Else.
%% The NIFs:
lib_version() -> undefined.
@@ -3028,6 +3208,10 @@ monitor_process_nif(_,_,_,_) -> ?nif_stub.
demonitor_process_nif(_,_) -> ?nif_stub.
compare_monitors_nif(_,_) -> ?nif_stub.
monitor_frenzy_nif(_,_,_,_) -> ?nif_stub.
+ioq_nif(_) -> ?nif_stub.
+ioq_nif(_,_) -> ?nif_stub.
+ioq_nif(_,_,_) -> ?nif_stub.
+ioq_nif(_,_,_,_) -> ?nif_stub.
%% whereis
whereis_send(_Type,_Name,_Msg) -> ?nif_stub.
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index 307d1c390f..b47d013bd2 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -186,6 +186,12 @@ static ErlNifResourceTypeInit frenzy_rt_init = {
static ErlNifResourceType* whereis_resource_type;
static void whereis_thread_resource_dtor(ErlNifEnv* env, void* obj);
+static ErlNifResourceType* ioq_resource_type;
+
+static void ioq_resource_dtor(ErlNifEnv* env, void* obj);
+struct ioq_resource {
+ ErlNifIOQueue *q;
+};
static int get_pointer(ErlNifEnv* env, ERL_NIF_TERM term, void** pp)
{
@@ -243,6 +249,10 @@ static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
whereis_resource_type = enif_open_resource_type(env, NULL, "nif_SUITE.whereis",
whereis_thread_resource_dtor, ERL_NIF_RT_CREATE, NULL);
+ ioq_resource_type = enif_open_resource_type(env,NULL,"ioq",
+ ioq_resource_dtor,
+ ERL_NIF_RT_CREATE, NULL);
+
atom_false = enif_make_atom(env,"false");
atom_true = enif_make_atom(env,"true");
atom_self = enif_make_atom(env,"self");
@@ -2430,7 +2440,6 @@ static ERL_NIF_TERM format_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM arg
return enif_make_binary(env,&obin);
}
-
static int get_fd(ErlNifEnv* env, ERL_NIF_TERM term, struct fd_resource** rsrc)
{
if (!enif_get_resource(env, term, fd_resource_type, (void**)rsrc)) {
@@ -3158,7 +3167,231 @@ static void frenzy_resource_down(ErlNifEnv* env, void* obj, ErlNifPid* pid,
abort();
}
+/*********** testing ioq ************/
+
+static void ioq_resource_dtor(ErlNifEnv* env, void* obj) {
+
+}
+
+#ifndef __WIN32__
+static int writeiovec(ErlNifEnv *env, ERL_NIF_TERM term, ERL_NIF_TERM *tail, ErlNifIOQueue *q, int fd) {
+ ErlNifIOVec vec, *iovec = &vec;
+ SysIOVec *sysiovec;
+ int saved_errno;
+ int iovcnt, n;
+
+ if (!enif_inspect_iovec(env, 64, term, tail, &iovec))
+ return -2;
+
+ if (enif_ioq_size(q) > 0) {
+ /* If the I/O queue contains data we enqueue the iovec and then
+ peek the data to write out of the queue. */
+ if (!enif_ioq_enqv(q, iovec, 0))
+ return -3;
+
+ sysiovec = enif_ioq_peek(q, &iovcnt);
+ } else {
+ /* If the I/O queue is empty we skip the trip through it. */
+ iovcnt = iovec->iovcnt;
+ sysiovec = iovec->iov;
+ }
+
+ /* Attempt to write the data */
+ n = writev(fd, sysiovec, iovcnt);
+ saved_errno = errno;
+
+ if (enif_ioq_size(q) == 0) {
+ /* If the I/O queue was initially empty we enqueue any
+ remaining data into the queue for writing later. */
+ if (n >= 0 && !enif_ioq_enqv(q, iovec, n))
+ return -3;
+ } else {
+ /* Dequeue any data that was written from the queue. */
+ if (n > 0 && !enif_ioq_deq(q, n, NULL))
+ return -4;
+ }
+
+ /* return n, which is either number of bytes written or -1 if
+ some error happened */
+ errno = saved_errno;
+ return n;
+}
+#endif
+
+static ERL_NIF_TERM ioq(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ struct ioq_resource *ioq;
+ ERL_NIF_TERM ret;
+ if (enif_is_identical(argv[0], enif_make_atom(env, "create"))) {
+ ErlNifIOQueue *q = enif_ioq_create(ERL_NIF_IOQ_NORMAL);
+ ioq = (struct ioq_resource *)enif_alloc_resource(ioq_resource_type,
+ sizeof(*ioq));
+ ioq->q = q;
+ ret = enif_make_resource(env, ioq);
+ enif_release_resource(ioq);
+ return ret;
+ } else if (enif_is_identical(argv[0], enif_make_atom(env, "inspect"))) {
+ ErlNifIOVec vec, *iovec = NULL;
+ int i, iovcnt;
+ ERL_NIF_TERM *elems, tail, list;
+ ErlNifEnv *myenv = NULL;
+
+ if (enif_is_identical(argv[2], enif_make_atom(env, "use_stack")))
+ iovec = &vec;
+ if (enif_is_identical(argv[3], enif_make_atom(env, "use_env")))
+ myenv = env;
+ if (!enif_inspect_iovec(myenv, ~(size_t)0, argv[1], &tail, &iovec))
+ return enif_make_badarg(env);
+
+ iovcnt = iovec->iovcnt;
+ elems = enif_alloc(sizeof(ERL_NIF_TERM) * iovcnt);
+
+ for (i = 0; i < iovcnt; i++) {
+ ErlNifBinary bin;
+ if (!enif_alloc_binary(iovec->iov[i].iov_len, &bin)) {
+ enif_free_iovec(iovec);
+ enif_free(elems);
+ return enif_make_badarg(env);
+ }
+ memcpy(bin.data, iovec->iov[i].iov_base, iovec->iov[i].iov_len);
+ elems[i] = enif_make_binary(env, &bin);
+ }
+
+ if (!myenv)
+ enif_free_iovec(iovec);
+
+ list = enif_make_list_from_array(env, elems, iovcnt);
+ enif_free(elems);
+ return list;
+ } else {
+ unsigned skip;
+ if (!enif_get_resource(env, argv[1], ioq_resource_type, (void**)&ioq)
+ || !ioq->q)
+ return enif_make_badarg(env);
+
+ if (enif_is_identical(argv[0], enif_make_atom(env, "example"))) {
+#ifndef __WIN32__
+ int fd[2], res = 0, cnt = 0, queue_cnt;
+ ERL_NIF_TERM tail;
+ char buff[255];
+ pipe(fd);
+ fcntl(fd[0], F_SETFL, fcntl(fd[0], F_GETFL) | O_NONBLOCK);
+ fcntl(fd[1], F_SETFL, fcntl(fd[1], F_GETFL) | O_NONBLOCK);
+
+ /* Write until the pipe buffer is full, which should result in data
+ * being queued up. */
+ for (res = 0; res >= 0; ) {
+ cnt += res;
+ res = writeiovec(env, argv[2], &tail, ioq->q, fd[1]);
+ }
+
+ /* Flush the queue while reading from the other end of the pipe. */
+ tail = enif_make_list(env, 0);
+ while (enif_ioq_size(ioq->q) > 0) {
+ res = writeiovec(env, tail, &tail, ioq->q, fd[1]);
+ if (res < 0 && errno != EAGAIN) {
+ break;
+ } else if (res > 0) {
+ cnt += res;
+ }
+
+ for (res = 0; res >= 0; ) {
+ cnt -= res;
+ res = read(fd[0], buff, sizeof(buff));
+ }
+ }
+
+ close(fd[0]);
+ close(fd[1]);
+
+ /* Check that we read as much as we wrote */
+ if (cnt == 0 && enif_ioq_size(ioq->q) == 0)
+ return enif_make_atom(env, "true");
+
+ return enif_make_int(env, cnt);
+#else
+ return enif_make_atom(env, "true");
+#endif
+ }
+ if (enif_is_identical(argv[0], enif_make_atom(env, "destroy"))) {
+ enif_ioq_destroy(ioq->q);
+ ioq->q = NULL;
+ return enif_make_atom(env, "false");
+ } else if (enif_is_identical(argv[0], enif_make_atom(env, "enqv"))) {
+ ErlNifIOVec vec, *iovec = &vec;
+ ERL_NIF_TERM tail;
+
+ if (!enif_get_uint(env, argv[3], &skip))
+ return enif_make_badarg(env);
+ if (!enif_inspect_iovec(env, ~0ul, argv[2], &tail, &iovec))
+ return enif_make_badarg(env);
+ if (!enif_ioq_enqv(ioq->q, iovec, skip))
+ return enif_make_badarg(env);
+
+ return enif_make_atom(env, "true");
+ } else if (enif_is_identical(argv[0], enif_make_atom(env, "enqb"))) {
+ ErlNifBinary bin;
+ if (!enif_get_uint(env, argv[3], &skip) ||
+ !enif_inspect_binary(env, argv[2], &bin))
+ return enif_make_badarg(env);
+
+ if (!enif_ioq_enq_binary(ioq->q, &bin, skip))
+ return enif_make_badarg(env);
+
+ return enif_make_atom(env, "true");
+ } else if (enif_is_identical(argv[0], enif_make_atom(env, "enqbraw"))) {
+ ErlNifBinary bin;
+ ErlNifBinary localbin;
+ int i;
+ if (!enif_get_uint(env, argv[3], &skip) ||
+ !enif_inspect_binary(env, argv[2], &bin) ||
+ !enif_alloc_binary(bin.size, &localbin))
+ return enif_make_badarg(env);
+
+ memcpy(localbin.data, bin.data, bin.size);
+ i = enif_ioq_enq_binary(ioq->q, &localbin, skip);
+ if (!i)
+ return enif_make_badarg(env);
+ else
+ return enif_make_atom(env, "true");
+ } else if (enif_is_identical(argv[0], enif_make_atom(env, "peek"))) {
+ int iovlen, num, i, off = 0;
+ SysIOVec *iov = enif_ioq_peek(ioq->q, &iovlen);
+ ErlNifBinary bin;
+
+ if (!enif_get_int(env, argv[2], &num) || !enif_alloc_binary(num, &bin))
+ return enif_make_badarg(env);
+
+ for (i = 0; i < iovlen && num > 0; i++) {
+ int to_copy = num < iov[i].iov_len ? num : iov[i].iov_len;
+ memcpy(bin.data + off, iov[i].iov_base, to_copy);
+ num -= to_copy;
+ off += to_copy;
+ }
+
+ return enif_make_binary(env, &bin);
+ } else if (enif_is_identical(argv[0], enif_make_atom(env, "deq"))) {
+ int num;
+ size_t sz;
+ ErlNifUInt64 sz64;
+ if (!enif_get_int(env, argv[2], &num))
+ return enif_make_badarg(env);
+
+ if (!enif_ioq_deq(ioq->q, num, &sz))
+ return enif_make_badarg(env);
+
+ sz64 = sz;
+
+ return enif_make_uint64(env, sz64);
+ } else if (enif_is_identical(argv[0], enif_make_atom(env, "size"))) {
+ ErlNifUInt64 size = enif_ioq_size(ioq->q);
+ return enif_make_uint64(env, size);
+ }
+ }
+
+ return enif_make_badarg(env);
+}
static ErlNifFunc nif_funcs[] =
{
@@ -3255,7 +3488,11 @@ static ErlNifFunc nif_funcs[] =
{"whereis_send", 3, whereis_send},
{"whereis_term", 2, whereis_term},
{"whereis_thd_lookup", 2, whereis_thd_lookup},
- {"whereis_thd_result", 1, whereis_thd_result}
+ {"whereis_thd_result", 1, whereis_thd_result},
+ {"ioq_nif", 1, ioq},
+ {"ioq_nif", 2, ioq},
+ {"ioq_nif", 3, ioq},
+ {"ioq_nif", 4, ioq}
};
ERL_NIF_INIT(nif_SUITE,nif_funcs,load,NULL,upgrade,unload)
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index f512fa3a57..730a17d7e8 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -86,6 +86,7 @@
cd_relative/1,
close_deaf_port/1,
count_fds/1,
+ dropped_commands/1,
dying_port/1,
env/1,
eof/1,
@@ -158,7 +159,7 @@ suite() ->
all() ->
[otp_6224, {group, stream}, basic_ping, slow_writes,
bad_packet, bad_port_messages, {group, options},
- {group, multiple_packets}, parallell, dying_port,
+ {group, multiple_packets}, parallell, dying_port, dropped_commands,
port_program_with_path, open_input_file_port,
open_output_file_port, name1, env, huge_env, bad_env, cd,
cd_relative, pipe_limit_env, bad_args,
@@ -548,6 +549,47 @@ make_dying_port(Config) when is_list(Config) ->
Command = lists:concat([PortTest, " -h0 -d -q"]),
open_port({spawn, Command}, [stream]).
+%% Test that dropped port_commands work correctly.
+%% This used to cause a segfault.
+%%
+%% This testcase creates a port and then lets many processes
+%% do parallel commands to it. After a while it closes the
+%% port and we are trying to catch the race when doing a
+%% command while the port is closing.
+dropped_commands(Config) ->
+ %% Test with output callback
+ dropped_commands(Config, false, {self(), {command, "1"}}),
+ %% Test with outputv callback
+ dropped_commands(Config, true, {self(), {command, "1"}}).
+
+dropped_commands(Config, Outputv, Cmd) ->
+ Path = proplists:get_value(data_dir, Config),
+ os:putenv("ECHO_DRV_USE_OUTPUTV", atom_to_list(Outputv)),
+ ok = load_driver(Path, "echo_drv"),
+ [dropped_commands_test(Cmd) || _ <- lists:seq(1, 100)],
+ timer:sleep(100),
+ erl_ddll:unload_driver("echo_drv"),
+ os:unsetenv("ECHO_DRV_USE_OUTPUTV"),
+ ok.
+
+dropped_commands_test(Cmd) ->
+ spawn_monitor(
+ fun() ->
+ Port = erlang:open_port({spawn_driver, "echo_drv"},
+ [{parallelism, true}]),
+ [spawn_link(fun() -> spin(Port, Cmd) end) || _ <- lists:seq(1,8)],
+ timer:sleep(5),
+ port_close(Port),
+ timer:sleep(5),
+ exit(nok)
+ end),
+ receive _M -> timer:sleep(5) end.
+
+spin(P, Cmd) ->
+ P ! Cmd,
+ spin(P, Cmd).
+
+
%% Tests that port program with complete path (but without any
%% .exe extension) can be started, even if there is a file with
%% the same name but without the extension in the same directory.
diff --git a/erts/emulator/test/port_SUITE_data/echo_drv.c b/erts/emulator/test/port_SUITE_data/echo_drv.c
index 1d39c6a00c..b4370f6455 100644
--- a/erts/emulator/test/port_SUITE_data/echo_drv.c
+++ b/erts/emulator/test/port_SUITE_data/echo_drv.c
@@ -18,8 +18,11 @@ typedef struct _erl_drv_data EchoDrvData;
static EchoDrvData *echo_drv_start(ErlDrvPort port, char *command);
static void echo_drv_stop(EchoDrvData *data_p);
-static void echo_drv_output(ErlDrvData drv_data, char *buf,
- ErlDrvSizeT len);
+static void echo_drv_output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len);
+static ErlDrvSSizeT echo_control(ErlDrvData drv_data,
+ unsigned int command, char *buf,
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen);
+static void echo_outputv(ErlDrvData drv_data, ErlIOVec *ev);
static void echo_drv_finish(void);
static ErlDrvEntry echo_drv_entry = {
@@ -32,9 +35,9 @@ static ErlDrvEntry echo_drv_entry = {
"echo_drv",
echo_drv_finish,
NULL, /* handle */
- NULL, /* control */
+ echo_control, /* control */
NULL, /* timeout */
- NULL, /* outputv */
+ echo_outputv, /* outputv */
NULL, /* ready_async */
NULL,
NULL,
@@ -56,6 +59,14 @@ static ErlDrvEntry echo_drv_entry = {
DRIVER_INIT(echo_drv)
{
+ char buf[10];
+ size_t bufsz = sizeof(buf);
+ char *use_outputv;
+ use_outputv = (erl_drv_getenv("ECHO_DRV_USE_OUTPUTV", buf, &bufsz) == 0
+ ? buf
+ : "false");
+ if (strcmp(use_outputv, "true") != 0)
+ echo_drv_entry.outputv = NULL;
return &echo_drv_entry;
}
@@ -87,3 +98,15 @@ static void echo_drv_output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) {
static void echo_drv_finish() {
}
+
+static ErlDrvSSizeT echo_control(ErlDrvData drv_data,
+ unsigned int command, char *buf,
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen)
+{
+ return 0;
+}
+
+static void echo_outputv(ErlDrvData drv_data, ErlIOVec *ev)
+{
+ return;
+}
diff --git a/erts/emulator/test/register_SUITE.erl b/erts/emulator/test/register_SUITE.erl
index 43ae749498..49da94a775 100644
--- a/erts/emulator/test/register_SUITE.erl
+++ b/erts/emulator/test/register_SUITE.erl
@@ -44,14 +44,7 @@ all() ->
-define(OTP_8099_NAME, otp_8099_reg_proc).
otp_8099(Config) when is_list(Config) ->
- case catch erlang:system_info(lock_counting) of
- true -> {skipped,
- "Lock counting enabled. Current lock counting "
- "implementation cannot handle this many "
- "processes."};
- _ ->
- otp_8099_test(1000000)
- end.
+ otp_8099_test(1000000).
otp_8099_test(0) ->
ok;
diff --git a/erts/emulator/test/statistics_SUITE.erl b/erts/emulator/test/statistics_SUITE.erl
index 7690557fda..40cc940a94 100644
--- a/erts/emulator/test/statistics_SUITE.erl
+++ b/erts/emulator/test/statistics_SUITE.erl
@@ -23,8 +23,10 @@
%% Tests the statistics/1 bif.
-export([all/0, suite/0, groups/0,
+ wall_clock_sanity/1,
wall_clock_zero_diff/1, wall_clock_update/1,
- runtime_zero_diff/1,
+ runtime_sanity/1,
+ runtime_zero_diff/1,
runtime_update/1, runtime_diff/1,
run_queue_one/1,
scheduler_wall_time/1,
@@ -54,11 +56,23 @@ all() ->
groups() ->
[{wall_clock, [],
- [wall_clock_zero_diff, wall_clock_update]},
+ [wall_clock_sanity, wall_clock_zero_diff, wall_clock_update]},
{runtime, [],
- [runtime_zero_diff, runtime_update, runtime_diff]},
+ [runtime_sanity, runtime_zero_diff, runtime_update, runtime_diff]},
{run_queue, [], [run_queue_one]}].
+wall_clock_sanity(Config) when is_list(Config) ->
+ erlang:yield(),
+ {WallClock, _} = statistics(wall_clock),
+ MT = erlang:monotonic_time(),
+ Time = erlang:convert_time_unit(MT - erlang:system_info(start_time),
+ native, millisecond),
+ io:format("Time=~p WallClock=~p~n",
+ [Time, WallClock]),
+ true = WallClock =< Time,
+ true = Time - 100 =< WallClock,
+ ok.
+
%%% Testing statistics(wall_clock).
%% Tests that the 'Wall clock since last call' element of the result
@@ -102,6 +116,20 @@ wall_clock_update1(0) ->
%%% Test statistics(runtime).
+runtime_sanity(Config) when is_list(Config) ->
+ case erlang:system_info(logical_processors_available) of
+ unknown ->
+ {skipped, "Don't know available logical processors"};
+ LP when is_integer(LP) ->
+ erlang:yield(),
+ {RunTime, _} = statistics(runtime),
+ MT = erlang:monotonic_time(),
+ Time = erlang:convert_time_unit(MT - erlang:system_info(start_time),
+ native, millisecond),
+ io:format("Time=~p RunTime=~p~n",
+ [Time, RunTime]),
+ true = RunTime =< Time*LP
+ end.
%% Tests that the difference between the times returned from two consectuitive
%% calls to statistics(runtime) is zero.
diff --git a/erts/etc/common/Makefile.in b/erts/etc/common/Makefile.in
index 583426460e..5b1b9119ce 100644
--- a/erts/etc/common/Makefile.in
+++ b/erts/etc/common/Makefile.in
@@ -57,7 +57,7 @@ ERTS_INCL = -I$(ERL_TOP)/erts/include \
CC = @CC@
WFLAGS = @WFLAGS@
CFLAGS = @CFLAGS@ @DEFS@ $(TYPE_FLAGS) @WFLAGS@ -I$(SYSDIR) -I$(EMUDIR) \
- $(ERTS_INCL) -DOTP_SYSTEM_VERSION=\"$(SYSTEM_VSN)\"
+ -I$(COMSYSDIR) $(ERTS_INCL) -DOTP_SYSTEM_VERSION=\"$(SYSTEM_VSN)\"
LD = @LD@
LIBS = @LIBS@
LDFLAGS = @LDFLAGS@
@@ -70,13 +70,16 @@ endif
ifeq ($(TARGET),win32)
ifeq ($(TYPE),debug)
CFLAGS = $(subst -O2,-g,@CFLAGS@ @DEFS@ $(TYPE_FLAGS) @WFLAGS@ -I$(SYSDIR) \
- -I$(EMUDIR) $(ERTS_INCL) -DOTP_SYSTEM_VERSION=\"$(SYSTEM_VSN)\")
+ -I$(EMUDIR) -I$(COMSYSDIR) $(ERTS_INCL) \
+ -DOTP_SYSTEM_VERSION=\"$(SYSTEM_VSN)\")
LDFLAGS += -g
endif
endif
+
BINDIR = $(ERL_TOP)/bin/$(TARGET)
OBJDIR = $(ERL_TOP)/erts/obj$(TYPEMARKER)/$(TARGET)
EMUDIR = $(ERL_TOP)/erts/emulator/beam
+COMSYSDIR = $(ERL_TOP)/erts/emulator/sys/common
EMUOSDIR = $(ERL_TOP)/erts/emulator/@ERLANG_OSTYPE@
SYSDIR = $(ERL_TOP)/erts/emulator/sys/@ERLANG_OSTYPE@
DRVDIR = $(ERL_TOP)/erts/emulator/drivers/@ERLANG_OSTYPE@
diff --git a/erts/etc/common/escript.c b/erts/etc/common/escript.c
index 7f0af77a4c..9cd5dd3fab 100644
--- a/erts/etc/common/escript.c
+++ b/erts/etc/common/escript.c
@@ -74,7 +74,6 @@ static void error(char* format, ...);
static void* emalloc(size_t size);
static void efree(void *p);
static char* strsave(char* string);
-static void push_words(char* src);
static int run_erlang(char* name, char** argv);
static char* get_default_emulator(char* progname);
#ifdef __WIN32__
@@ -583,26 +582,6 @@ main(int argc, char** argv)
return run_erlang(eargv[0], eargv);
}
-static void
-push_words(char* src)
-{
- char sbuf[PMAX];
- char* dst;
-
- dst = sbuf;
- while ((*dst++ = *src++) != '\0') {
- if (isspace((int)*src)) {
- *dst = '\0';
- PUSH(strsave(sbuf));
- dst = sbuf;
- do {
- src++;
- } while (isspace((int)*src));
- }
- }
- if (sbuf[0])
- PUSH(strsave(sbuf));
-}
#ifdef __WIN32__
wchar_t *make_commandline(char **argv)
{
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index 58c17dc416..6fa48e8582 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam
index 2acb1f1211..1c8d0e626a 100644
--- a/erts/preloaded/ebin/init.beam
+++ b/erts/preloaded/ebin/init.beam
Binary files differ
diff --git a/erts/preloaded/ebin/zlib.beam b/erts/preloaded/ebin/zlib.beam
index a959ebaaf2..267b5cb0a8 100644
--- a/erts/preloaded/ebin/zlib.beam
+++ b/erts/preloaded/ebin/zlib.beam
Binary files differ
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index 72dd804412..f796ea64d3 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -87,6 +87,10 @@
-export_type([prepared_code/0]).
+-type iovec() :: [binary()].
+
+-export_type([iovec/0]).
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Native code BIF stubs and their types
%% (BIF's actually implemented in this module goes last in the file)
@@ -124,7 +128,7 @@
has_prepared_code_on_load/1, hibernate/3]).
-export([insert_element/3]).
-export([integer_to_binary/1, integer_to_list/1]).
--export([iolist_size/1, iolist_to_binary/1]).
+-export([iolist_size/1, iolist_to_binary/1, iolist_to_iovec/1]).
-export([is_alive/0, is_builtin/3, is_process_alive/1, length/1, link/1]).
-export([list_to_atom/1, list_to_binary/1]).
-export([list_to_bitstring/1, list_to_existing_atom/1, list_to_float/1]).
@@ -1079,6 +1083,12 @@ iolist_size(_Item) ->
iolist_to_binary(_IoListOrBinary) ->
erlang:nif_error(undefined).
+%% iolist_to_iovec/1
+-spec erlang:iolist_to_iovec(IoListOrBinary) -> iovec() when
+ IoListOrBinary :: iolist() | binary().
+iolist_to_iovec(_IoListOrBinary) ->
+ erlang:nif_error(undefined).
+
%% is_alive/0
-spec is_alive() -> boolean().
is_alive() ->
diff --git a/erts/preloaded/src/init.erl b/erts/preloaded/src/init.erl
index 1ccf8d599f..34a9f6b8b9 100644
--- a/erts/preloaded/src/init.erl
+++ b/erts/preloaded/src/init.erl
@@ -200,6 +200,8 @@ boot(BootArgs) ->
register(init, self()),
process_flag(trap_exit, true),
+ %% Load the zlib nif
+ zlib:on_load(),
%% Load the tracer nif
erl_tracer:on_load(),
diff --git a/erts/preloaded/src/zlib.erl b/erts/preloaded/src/zlib.erl
index 8cd3e39fd7..dca5a42779 100644
--- a/erts/preloaded/src/zlib.erl
+++ b/erts/preloaded/src/zlib.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,19 +21,29 @@
-module(zlib).
-export([open/0,close/1,deflateInit/1,deflateInit/2,deflateInit/6,
- deflateSetDictionary/2,deflateReset/1,deflateParams/3,
- deflate/2,deflate/3,deflateEnd/1,
- inflateInit/1,inflateInit/2,
- inflateSetDictionary/2,inflateGetDictionary/1,
- inflateSync/1,inflateReset/1,inflate/2,inflateEnd/1,
- inflateChunk/1, inflateChunk/2,
- setBufSize/2,getBufSize/1,
- crc32/1,crc32/2,crc32/3,adler32/2,adler32/3,getQSize/1,
- crc32_combine/4,adler32_combine/4,
- compress/1,uncompress/1,zip/1,unzip/1,
- gzip/1,gunzip/1]).
-
--export_type([zstream/0, zlevel/0, zwindowbits/0, zmemlevel/0, zstrategy/0]).
+ deflateSetDictionary/2,deflateReset/1,deflateParams/3,
+ deflate/2,deflate/3,deflateEnd/1,
+ inflateInit/1,inflateInit/2,
+ inflateSetDictionary/2,inflateGetDictionary/1, inflateReset/1,
+ inflate/2,inflate/3,inflateEnd/1,
+ inflateChunk/2,inflateChunk/1,
+ safeInflate/2,
+ setBufSize/2,getBufSize/1,
+ crc32/1,crc32/2,crc32/3,adler32/2,adler32/3,
+ crc32_combine/4,adler32_combine/4,
+ compress/1,uncompress/1,zip/1,unzip/1,
+ gzip/1,gunzip/1]).
+
+-export([on_load/0]).
+
+%% These are soft-deprecated until OTP 21.
+% -deprecated([inflateChunk/1, inflateChunk/2,
+% getBufSize/1, setBufSize/2,
+% crc32/1,crc32/2,crc32/3,adler32/2,adler32/3,
+% crc32_combine/4,adler32_combine/4]).
+
+-export_type([zstream/0, zflush/0, zlevel/0, zwindowbits/0, zmemlevel/0,
+ zstrategy/0]).
%% flush argument encoding
-define(Z_NO_FLUSH, 0).
@@ -56,116 +66,76 @@
%% deflate compression method
-define(Z_DEFLATED, 8).
--define(Z_NULL, 0).
-
-define(MAX_WBITS, 15).
-%% gzip defs (rfc 1952)
-
--define(ID1, 16#1f).
--define(ID2, 16#8b).
-
--define(FTEXT, 16#01).
--define(FHCRC, 16#02).
--define(FEXTRA, 16#04).
--define(FNAME, 16#08).
--define(FCOMMENT, 16#10).
--define(RESERVED, 16#E0).
-
--define(OS_MDDOS, 0).
--define(OS_AMIGA, 1).
--define(OS_OPENVMS, 2).
--define(OS_UNIX, 3).
--define(OS_VMCMS, 4).
--define(OS_ATARI, 5).
--define(OS_OS2, 6).
--define(OS_MAC, 7).
--define(OS_ZSYS, 8).
--define(OS_CPM, 9).
--define(OS_TOP20, 10).
--define(OS_NTFS, 11).
--define(OS_QDOS, 12).
--define(OS_ACORN, 13).
--define(OS_UNKNOWN,255).
-
--define(DEFLATE_INIT, 1).
--define(DEFLATE_INIT2, 2).
--define(DEFLATE_SETDICT, 3).
--define(DEFLATE_RESET, 4).
--define(DEFLATE_END, 5).
--define(DEFLATE_PARAMS, 6).
--define(DEFLATE, 7).
-
--define(INFLATE_INIT, 8).
--define(INFLATE_INIT2, 9).
--define(INFLATE_SETDICT, 10).
--define(INFLATE_GETDICT, 11).
--define(INFLATE_SYNC, 12).
--define(INFLATE_RESET, 13).
--define(INFLATE_END, 14).
--define(INFLATE, 15).
--define(INFLATE_CHUNK, 26).
-
--define(CRC32_0, 16).
--define(CRC32_1, 17).
--define(CRC32_2, 18).
-
--define(SET_BUFSZ, 19).
--define(GET_BUFSZ, 20).
--define(GET_QSIZE, 21).
-
--define(ADLER32_1, 22).
--define(ADLER32_2, 23).
-
--define(CRC32_COMBINE, 24).
--define(ADLER32_COMBINE, 25).
+%% Chunk sizes are hardcoded on account of them screwing with the
+%% predictability of the system. zlib is incapable of trapping so we need to
+%% ensure that it never operates on any significant amount of data.
+-define(DEFLATE_IN_CHUNKSIZE, 8 bsl 10).
+-define(DEFLATE_OUT_CHUNKSIZE, 8 bsl 10).
+-define(INFLATE_IN_CHUNKSIZE, 8 bsl 10).
+-define(INFLATE_OUT_CHUNKSIZE, 16 bsl 10).
%%------------------------------------------------------------------------
-%% Main data types of the file
--type zstream() :: port().
+%% Public data types.
+-type zstream() :: term().
+-type zflush() :: 'none' | 'sync' | 'full' | 'finish'.
-%% Auxiliary data types of the file
--type zlevel() :: 'none' | 'default' | 'best_compression' | 'best_speed'
- | 0..9.
--type zmethod() :: 'deflated'.
+-type zlevel() ::
+ 'none' | 'default' | 'best_compression' | 'best_speed' | 0..9.
+-type zstrategy() :: 'default' | 'filtered' | 'huffman_only' | 'rle'.
+
+-type zmemlevel() :: 1..9.
-type zwindowbits() :: -15..-8 | 8..47.
--type zmemlevel() :: 1..9.
--type zstrategy() :: 'default' | 'filtered' | 'huffman_only' | 'rle'.
+
+%% Private data types.
+
+-type zmethod() :: 'deflated'.
+
+-record(zlib_opts, {
+ stream :: zstream(),
+ method :: term(),
+ input_chunk_size :: integer(),
+ output_chunk_size :: integer(),
+ flush :: integer()
+ }).
%%------------------------------------------------------------------------
-%% open a z_stream
+on_load() ->
+ case erlang:load_nif(atom_to_list(?MODULE), 0) of
+ ok -> ok
+ end.
+
-spec open() -> zstream().
open() ->
- open_port({spawn, "zlib_drv"}, [binary]).
+ open_nif().
+open_nif() ->
+ erlang:nif_error(undef).
-%% close and release z_stream
-spec close(Z) -> 'ok' when
Z :: zstream().
close(Z) ->
- try
- true = port_close(Z),
- receive %In case the caller is the owner and traps exits
- {'EXIT',Z,_} -> ok
- after 0 -> ok
- end
- catch _:_ -> erlang:error(badarg)
- end.
+ close_nif(Z).
+close_nif(_Z) ->
+ erlang:nif_error(undef).
-spec deflateInit(Z) -> 'ok' when
Z :: zstream().
deflateInit(Z) ->
- call(Z, ?DEFLATE_INIT, <<?Z_DEFAULT_COMPRESSION:32>>).
+ deflateInit(Z, default).
-spec deflateInit(Z, Level) -> 'ok' when
Z :: zstream(),
Level :: zlevel().
deflateInit(Z, Level) ->
- call(Z, ?DEFLATE_INIT, <<(arg_level(Level)):32>>).
+ deflateInit_nif(Z, arg_level(Level)).
+
+deflateInit_nif(_Z, _Level) ->
+ erlang:nif_error(undef).
--spec deflateInit(Z, Level, Method,
- WindowBits, MemLevel, Strategy) -> 'ok' when
+-spec deflateInit(Z, Level, Method, WindowBits, MemLevel, Strategy) -> 'ok' when
Z :: zstream(),
Level :: zlevel(),
Method :: zmethod(),
@@ -173,31 +143,49 @@ deflateInit(Z, Level) ->
MemLevel :: zmemlevel(),
Strategy :: zstrategy().
deflateInit(Z, Level, Method, WindowBits, MemLevel, Strategy) ->
- call(Z, ?DEFLATE_INIT2, <<(arg_level(Level)):32,
- (arg_method(Method)):32,
- (arg_bitsz(WindowBits)):32,
- (arg_mem(MemLevel)):32,
- (arg_strategy(Strategy)):32>>).
+ deflateInit_nif(Z,
+ arg_level(Level),
+ arg_method(Method),
+ arg_bitsz(WindowBits),
+ arg_mem(MemLevel),
+ arg_strategy(Strategy)).
+deflateInit_nif(_Z, _Level, _Method, _WindowBits, _MemLevel, _Strategy) ->
+ erlang:nif_error(undef).
-spec deflateSetDictionary(Z, Dictionary) -> Adler32 when
Z :: zstream(),
Dictionary :: iodata(),
Adler32 :: integer().
deflateSetDictionary(Z, Dictionary) ->
- call(Z, ?DEFLATE_SETDICT, Dictionary).
+ deflateSetDictionary_nif(Z, Dictionary).
+deflateSetDictionary_nif(_Z, _Dictionary) ->
+ erlang:nif_error(undef).
-spec deflateReset(Z) -> 'ok' when
Z :: zstream().
deflateReset(Z) ->
- call(Z, ?DEFLATE_RESET, []).
+ deflateReset_nif(Z).
+deflateReset_nif(_Z) ->
+ erlang:nif_error(undef).
-spec deflateParams(Z, Level, Strategy) -> ok when
Z :: zstream(),
Level :: zlevel(),
Strategy :: zstrategy().
-deflateParams(Z, Level, Strategy) ->
- call(Z, ?DEFLATE_PARAMS, <<(arg_level(Level)):32,
- (arg_strategy(Strategy)):32>>).
+deflateParams(Z, Level0, Strategy0) ->
+ Level = arg_level(Level0),
+ Strategy = arg_strategy(Strategy0),
+ case deflateParams_nif(Z, Level, Strategy) of
+ buf_error ->
+ %% We had data left in the pipe; flush everything and stash it away
+ %% for the next deflate call before trying again.
+ Output = deflate(Z, <<>>, full),
+ save_progress(Z, deflate, Output),
+ deflateParams_nif(Z, Level, Strategy);
+ Any -> Any
+ end.
+deflateParams_nif(_Z, _Level, _Strategy) ->
+ erlang:nif_error(undef).
-spec deflate(Z, Data) -> Compressed when
Z :: zstream(),
@@ -209,170 +197,234 @@ deflate(Z, Data) ->
-spec deflate(Z, Data, Flush) -> Compressed when
Z :: zstream(),
Data :: iodata(),
- Flush :: none | sync | full | finish,
+ Flush :: zflush(),
Compressed :: iolist().
deflate(Z, Data, Flush) ->
- try port_command(Z, Data) of
- true ->
- _ = call(Z, ?DEFLATE, <<(arg_flush(Flush)):32>>),
- collect(Z)
- catch
- error:_Err ->
- flush(Z),
- erlang:error(badarg)
- end.
+ Progress = restore_progress(Z, deflate),
+ enqueue_input(Z, Data),
+ append_iolist(Progress, dequeue_all_chunks(Z, deflate_opts(Flush))).
+
+deflate_opts(Flush) ->
+ #zlib_opts{
+ method = fun deflate_nif/4,
+ input_chunk_size = ?DEFLATE_IN_CHUNKSIZE,
+ output_chunk_size = ?DEFLATE_OUT_CHUNKSIZE,
+ flush = arg_flush(Flush)
+ }.
+
+deflate_nif(_Z, _InputChSize, _OutputChSize, _Flush) ->
+ erlang:nif_error(undef).
-spec deflateEnd(Z) -> 'ok' when
Z :: zstream().
deflateEnd(Z) ->
- call(Z, ?DEFLATE_END, []).
+ deflateEnd_nif(Z).
+deflateEnd_nif(_Z) ->
+ erlang:nif_error(undef).
-spec inflateInit(Z) -> 'ok' when
Z :: zstream().
inflateInit(Z) ->
- call(Z, ?INFLATE_INIT, []).
+ inflateInit_nif(Z).
+inflateInit_nif(_Z) ->
+ erlang:nif_error(undef).
-spec inflateInit(Z, WindowBits) -> 'ok' when
Z :: zstream(),
WindowBits :: zwindowbits().
-inflateInit(Z, WindowBits) ->
- call(Z, ?INFLATE_INIT2, <<(arg_bitsz(WindowBits)):32>>).
+inflateInit(Z, WindowBits) ->
+ inflateInit_nif(Z, WindowBits).
+inflateInit_nif(_Z, _WindowBits) ->
+ erlang:nif_error(undef).
-spec inflateSetDictionary(Z, Dictionary) -> 'ok' when
Z :: zstream(),
Dictionary :: iodata().
-inflateSetDictionary(Z, Dictionary) ->
- call(Z, ?INFLATE_SETDICT, Dictionary).
+inflateSetDictionary(Z, Dictionary) ->
+ inflateSetDictionary_nif(Z, Dictionary).
+inflateSetDictionary_nif(_Z, _Dictionary) ->
+ erlang:nif_error(undef).
-spec inflateGetDictionary(Z) -> Dictionary when
Z :: zstream(),
- Dictionary :: iolist().
+ Dictionary :: binary().
inflateGetDictionary(Z) ->
- _ = call(Z, ?INFLATE_GETDICT, []),
- collect(Z).
-
--spec inflateSync(zstream()) -> 'ok'.
-inflateSync(Z) ->
- call(Z, ?INFLATE_SYNC, []).
+ case inflateGetDictionary_nif(Z) of
+ Dictionary when is_binary(Dictionary) ->
+ Dictionary;
+ not_supported ->
+ erlang:error(enotsup)
+ end.
+inflateGetDictionary_nif(_Z) ->
+ erlang:nif_error(undef).
-spec inflateReset(Z) -> 'ok' when
Z :: zstream().
-inflateReset(Z) ->
- call(Z, ?INFLATE_RESET, []).
+inflateReset(Z) ->
+ inflateReset_nif(Z).
+inflateReset_nif(_Z) ->
+ erlang:nif_error(undef).
-spec inflate(Z, Data) -> Decompressed when
Z :: zstream(),
Data :: iodata(),
Decompressed :: iolist().
inflate(Z, Data) ->
- try port_command(Z, Data) of
- true ->
- _ = call(Z, ?INFLATE, <<?Z_NO_FLUSH:32>>),
- collect(Z)
- catch
- error:_Err ->
- flush(Z),
- erlang:error(badarg)
+ inflate(Z, Data, []).
+
+-spec inflate(Z, Data, Options) -> Decompressed when
+ Z :: zstream(),
+ Data :: iodata(),
+ Options :: list({exception_on_need_dict, boolean()}),
+ Decompressed :: iolist() |
+ {need_dictionary,
+ Adler32 :: integer(),
+ Output :: iolist()}.
+inflate(Z, Data, Options) ->
+ enqueue_input(Z, Data),
+ Result = dequeue_all_chunks(Z, inflate_opts()),
+ case proplist_get_value(Options, exception_on_need_dict, true) of
+ true -> exception_on_need_dict(Z, Result);
+ false -> Result
end.
+inflate_nif(_Z, _InputChSize, _OutputChSize, _Flush) ->
+ erlang:nif_error(undef).
+
+inflate_opts() ->
+ #zlib_opts{
+ method = fun inflate_nif/4,
+ input_chunk_size = ?INFLATE_IN_CHUNKSIZE,
+ output_chunk_size = ?INFLATE_OUT_CHUNKSIZE,
+ flush = arg_flush(none)
+ }.
+
-spec inflateChunk(Z, Data) -> Decompressed | {more, Decompressed} when
Z :: zstream(),
Data :: iodata(),
Decompressed :: iolist().
inflateChunk(Z, Data) ->
- try port_command(Z, Data) of
- true ->
- inflateChunk(Z)
- catch
- error:_Err ->
- flush(Z),
- erlang:error(badarg)
- end.
+ enqueue_input(Z, Data),
+ inflateChunk(Z).
-spec inflateChunk(Z) -> Decompressed | {more, Decompressed} when
Z :: zstream(),
Decompressed :: iolist().
inflateChunk(Z) ->
- Status = call(Z, ?INFLATE_CHUNK, []),
- Data = receive
- {Z, {data, Bin}} ->
- Bin
- after 0 ->
- []
- end,
-
- case Status of
- Good when (Good == ok) orelse (Good == stream_end) ->
- Data;
- inflate_has_more ->
- {more, Data}
- end.
+ Opts0 = inflate_opts(),
+ Opts = Opts0#zlib_opts { output_chunk_size = getBufSize(Z) },
+
+ Result0 = dequeue_next_chunk(Z, Opts),
+ Result1 = exception_on_need_dict(Z, Result0),
+ yield_inflateChunk(Z, Result1).
+
+yield_inflateChunk(_Z, {continue, Output}) ->
+ {more, lists:flatten(Output)};
+yield_inflateChunk(_Z, {finished, Output}) ->
+ lists:flatten(Output).
+
+exception_on_need_dict(Z, {need_dictionary, Adler, Output}) ->
+ Progress = restore_progress(Z, inflate),
+ save_progress(Z, inflate, append_iolist(Progress, Output)),
+ erlang:error({need_dictionary, Adler});
+exception_on_need_dict(Z, {Mark, Output}) ->
+ Progress = restore_progress(Z, inflate),
+ {Mark, append_iolist(Progress, Output)};
+exception_on_need_dict(Z, Output) when is_list(Output); is_binary(Output) ->
+ Progress = restore_progress(Z, inflate),
+ append_iolist(Progress, Output).
+
+-spec safeInflate(Z, Data) -> Result when
+ Z :: zstream(),
+ Data :: iodata(),
+ Result :: {continue, Output :: iolist()} |
+ {finished, Output :: iolist()} |
+ {need_dictionary,
+ Adler32 :: integer(),
+ Output :: iolist()}.
+safeInflate(Z, Data) ->
+ enqueue_input(Z, Data),
+ dequeue_next_chunk(Z, inflate_opts()).
-spec inflateEnd(Z) -> 'ok' when
Z :: zstream().
inflateEnd(Z) ->
- call(Z, ?INFLATE_END, []).
+ inflateEnd_nif(Z).
+inflateEnd_nif(_Z) ->
+ erlang:nif_error(undef).
-spec setBufSize(Z, Size) -> 'ok' when
Z :: zstream(),
Size :: non_neg_integer().
-setBufSize(Z, Size) ->
- call(Z, ?SET_BUFSZ, <<Size:32>>).
+setBufSize(Z, Size) when is_integer(Size), Size > 16, Size < (1 bsl 24) ->
+ setBufSize_nif(Z, Size);
+setBufSize(_Z, _Size) ->
+ erlang:error(badarg).
+setBufSize_nif(_Z, _Size) ->
+ erlang:nif_error(undef).
--spec getBufSize(Z) -> Size when
- Z :: zstream(),
- Size :: non_neg_integer().
+-spec getBufSize(Z) -> non_neg_integer() when
+ Z :: zstream().
getBufSize(Z) ->
- call(Z, ?GET_BUFSZ, []).
+ getBufSize_nif(Z).
+getBufSize_nif(_Z) ->
+ erlang:nif_error(undef).
-spec crc32(Z) -> CRC when
Z :: zstream(),
CRC :: integer().
crc32(Z) ->
- call(Z, ?CRC32_0, []).
+ crc32_nif(Z).
+crc32_nif(_Z) ->
+ erlang:nif_error(undef).
-spec crc32(Z, Data) -> CRC when
Z :: zstream(),
Data :: iodata(),
CRC :: integer().
-crc32(Z, Data) ->
- call(Z, ?CRC32_1, Data).
+crc32(Z, Data) when is_reference(Z) ->
+ erlang:crc32(Data);
+crc32(_Z, _Data) ->
+ erlang:error(badarg).
-spec crc32(Z, PrevCRC, Data) -> CRC when
Z :: zstream(),
PrevCRC :: integer(),
Data :: iodata(),
CRC :: integer().
-crc32(Z, CRC, Data) ->
- call(Z, ?CRC32_2, [<<CRC:32>>, Data]).
+crc32(Z, CRC, Data) when is_reference(Z) ->
+ erlang:crc32(CRC, Data);
+crc32(_Z, _CRC, _Data) ->
+ erlang:error(badarg).
+
+-spec crc32_combine(Z, CRC1, CRC2, Size2) -> CRC when
+ Z :: zstream(),
+ CRC :: integer(),
+ CRC1 :: integer(),
+ CRC2 :: integer(),
+ Size2 :: integer().
+crc32_combine(Z, CRC1, CRC2, Size2) when is_reference(Z) ->
+ erlang:crc32_combine(CRC1, CRC2, Size2);
+crc32_combine(_Z, _CRC1, _CRC2, _Size2) ->
+ erlang:error(badarg).
-spec adler32(Z, Data) -> CheckSum when
Z :: zstream(),
Data :: iodata(),
CheckSum :: integer().
-adler32(Z, Data) ->
- call(Z, ?ADLER32_1, Data).
+adler32(Z, Data) when is_reference(Z) ->
+ erlang:adler32(Data);
+adler32(_Z, _Data) ->
+ erlang:error(badarg).
-spec adler32(Z, PrevAdler, Data) -> CheckSum when
Z :: zstream(),
PrevAdler :: integer(),
Data :: iodata(),
CheckSum :: integer().
-adler32(Z, Adler, Data) when is_integer(Adler) ->
- call(Z, ?ADLER32_2, [<<Adler:32>>, Data]);
-adler32(_Z, _Adler, _Data) ->
- erlang:error(badarg).
-
--spec crc32_combine(Z, CRC1, CRC2, Size2) -> CRC when
- Z :: zstream(),
- CRC :: integer(),
- CRC1 :: integer(),
- CRC2 :: integer(),
- Size2 :: integer().
-crc32_combine(Z, CRC1, CRC2, Len2)
- when is_integer(CRC1), is_integer(CRC2), is_integer(Len2) ->
- call(Z, ?CRC32_COMBINE, <<CRC1:32, CRC2:32, Len2:32>>);
-crc32_combine(_Z, _CRC1, _CRC2, _Len2) ->
+adler32(Z, Adler, Data) when is_reference(Z) ->
+ erlang:adler32(Adler, Data);
+adler32(_Z, _Adler, _Data) ->
erlang:error(badarg).
-spec adler32_combine(Z, Adler1, Adler2, Size2) -> Adler when
@@ -381,16 +433,11 @@ crc32_combine(_Z, _CRC1, _CRC2, _Len2) ->
Adler1 :: integer(),
Adler2 :: integer(),
Size2 :: integer().
-adler32_combine(Z, Adler1, Adler2, Len2)
- when is_integer(Adler1), is_integer(Adler2), is_integer(Len2) ->
- call(Z, ?ADLER32_COMBINE, <<Adler1:32, Adler2:32, Len2:32>>);
-adler32_combine(_Z, _Adler1, _Adler2, _Len2) ->
+adler32_combine(Z, Adler1, Adler2, Size2) when is_reference(Z) ->
+ erlang:adler32_combine(Adler1, Adler2, Size2);
+adler32_combine(_Z, _Adler1, _Adler2, _Size2) ->
erlang:error(badarg).
--spec getQSize(zstream()) -> non_neg_integer().
-getQSize(Z) ->
- call(Z, ?GET_QSIZE, []).
-
%% compress/uncompress zlib with header
-spec compress(Data) -> Compressed when
Data :: iodata(),
@@ -398,13 +445,13 @@ getQSize(Z) ->
compress(Data) ->
Z = open(),
Bs = try
- deflateInit(Z, default),
- B = deflate(Z, Data, finish),
- deflateEnd(Z),
- B
- after
- close(Z)
- end,
+ deflateInit(Z, default),
+ B = deflate(Z, Data, finish),
+ deflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
iolist_to_binary(Bs).
-spec uncompress(Data) -> Decompressed when
@@ -416,14 +463,14 @@ uncompress(Data) ->
if
Size >= 8 ->
Z = open(),
- Bs = try
- inflateInit(Z),
- B = inflate(Z, Data),
- inflateEnd(Z),
- B
- after
- close(Z)
- end,
+ Bs = try
+ inflateInit(Z),
+ B = inflate(Z, Data),
+ inflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
iolist_to_binary(Bs);
true ->
erlang:error(data_error)
@@ -440,13 +487,13 @@ uncompress(Data) ->
zip(Data) ->
Z = open(),
Bs = try
- deflateInit(Z, default, deflated, -?MAX_WBITS, 8, default),
- B = deflate(Z, Data, finish),
- deflateEnd(Z),
- B
- after
- close(Z)
- end,
+ deflateInit(Z, default, deflated, -?MAX_WBITS, 8, default),
+ B = deflate(Z, Data, finish),
+ deflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
iolist_to_binary(Bs).
-spec unzip(Data) -> Decompressed when
@@ -455,28 +502,28 @@ zip(Data) ->
unzip(Data) ->
Z = open(),
Bs = try
- inflateInit(Z, -?MAX_WBITS),
- B = inflate(Z, Data),
- inflateEnd(Z),
- B
- after
- close(Z)
- end,
+ inflateInit(Z, -?MAX_WBITS),
+ B = inflate(Z, Data),
+ inflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
iolist_to_binary(Bs).
-
+
-spec gzip(Data) -> Compressed when
Data :: iodata(),
Compressed :: binary().
gzip(Data) ->
Z = open(),
Bs = try
- deflateInit(Z, default, deflated, 16+?MAX_WBITS, 8, default),
- B = deflate(Z, Data, finish),
- deflateEnd(Z),
- B
- after
- close(Z)
- end,
+ deflateInit(Z, default, deflated, 16+?MAX_WBITS, 8, default),
+ B = deflate(Z, Data, finish),
+ deflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
iolist_to_binary(Bs).
-spec gunzip(Data) -> Decompressed when
@@ -485,92 +532,150 @@ gzip(Data) ->
gunzip(Data) ->
Z = open(),
Bs = try
- inflateInit(Z, 16+?MAX_WBITS),
- B = inflate(Z, Data),
- inflateEnd(Z),
- B
- after
- close(Z)
- end,
+ inflateInit(Z, 16+?MAX_WBITS),
+ B = inflate(Z, Data),
+ inflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
iolist_to_binary(Bs).
--spec collect(zstream()) -> iolist().
-collect(Z) ->
- collect(Z, []).
-
--spec collect(zstream(), iolist()) -> iolist().
-collect(Z, Acc) ->
- receive
- {Z, {data, Bin}} ->
- collect(Z, [Bin|Acc])
- after 0 ->
- reverse(Acc)
+-spec dequeue_all_chunks(Z, Opts) -> Result when
+ Z :: zstream(),
+ Opts :: #zlib_opts{},
+ Result :: {need_dictionary, integer(), iolist()} |
+ iolist().
+dequeue_all_chunks(Z, Opts) ->
+ dequeue_all_chunks_1(Z, Opts, []).
+dequeue_all_chunks_1(Z, Opts, Output) ->
+ case dequeue_next_chunk(Z, Opts) of
+ {need_dictionary, _, _} = NeedDict ->
+ NeedDict;
+ {continue, Chunk} ->
+ dequeue_all_chunks_1(Z, Opts, append_iolist(Output, Chunk));
+ {finished, Chunk} ->
+ append_iolist(Output, Chunk)
end.
--spec flush(zstream()) -> 'ok'.
-flush(Z) ->
- receive
- {Z, {data,_}} ->
- flush(Z)
- after 0 ->
- ok
+-spec dequeue_next_chunk(Z, Opts) -> Result when
+ Z :: zstream(),
+ Opts :: #zlib_opts{},
+ Result :: {need_dictionary, integer(), iolist()} |
+ {continue, iolist()} |
+ {finished, iolist()}.
+dequeue_next_chunk(Z, Opts) ->
+ Method = Opts#zlib_opts.method,
+ IChSz = Opts#zlib_opts.input_chunk_size,
+ OChSz = Opts#zlib_opts.output_chunk_size,
+ Flush = Opts#zlib_opts.flush,
+ Method(Z, IChSz, OChSz, Flush).
+
+-spec append_iolist(IO, D) -> iolist() when
+ IO :: iodata(),
+ D :: iodata().
+append_iolist([], D) when is_list(D) -> D;
+append_iolist([], D) -> [D];
+append_iolist(IO, []) -> IO;
+append_iolist(IO, [D]) -> [IO, D];
+append_iolist(IO, D) -> [IO, D].
+
+%% inflate/2 and friends are documented as throwing an error on Z_NEED_DICT
+%% rather than simply returning something to that effect, and deflateParams/3
+%% may flush behind the scenes. This requires us to stow away our current
+%% progress in the handle and resume from that point on our next call.
+%%
+%% Generally speaking this is either a refc binary or nothing at all, so it's
+%% pretty cheap.
+
+-spec save_progress(Z, Kind, Output) -> ok when
+ Z :: zstream(),
+ Kind :: inflate | deflate,
+ Output :: iolist().
+save_progress(Z, Kind, Output) ->
+ ok = setStash_nif(Z, {Kind, Output}).
+
+-spec restore_progress(Z, Kind) -> iolist() when
+ Z :: zstream(),
+ Kind :: inflate | deflate.
+restore_progress(Z, Kind) ->
+ case getStash_nif(Z) of
+ {ok, {Kind, Output}} ->
+ ok = clearStash_nif(Z),
+ Output;
+ empty ->
+ []
end.
-
-arg_flush(none) -> ?Z_NO_FLUSH;
+
+-spec clearStash_nif(Z) -> ok when
+ Z :: zstream().
+clearStash_nif(_Z) ->
+ erlang:nif_error(undef).
+
+-spec setStash_nif(Z, Term) -> ok when
+ Z :: zstream(),
+ Term :: term().
+setStash_nif(_Z, _Term) ->
+ erlang:nif_error(undef).
+
+-spec getStash_nif(Z) -> {ok, term()} | empty when
+ Z :: zstream().
+getStash_nif(_Z) ->
+ erlang:nif_error(undef).
+
+%% The 'proplists' module isn't preloaded so we can't rely on its existence.
+proplist_get_value([], _Name, DefVal) -> DefVal;
+proplist_get_value([{Name, Value} | _Opts], Name, _DefVal) -> Value;
+proplist_get_value([_Head | Opts], Name, DefVal) ->
+ proplist_get_value(Opts, Name, DefVal).
+
+arg_flush(none) -> ?Z_NO_FLUSH;
%% ?Z_PARTIAL_FLUSH is deprecated in zlib -- deliberately not included.
-arg_flush(sync) -> ?Z_SYNC_FLUSH;
-arg_flush(full) -> ?Z_FULL_FLUSH;
-arg_flush(finish) -> ?Z_FINISH;
-arg_flush(_) -> erlang:error(badarg).
+arg_flush(sync) -> ?Z_SYNC_FLUSH;
+arg_flush(full) -> ?Z_FULL_FLUSH;
+arg_flush(finish) -> ?Z_FINISH;
+arg_flush(_) -> erlang:error(bad_flush_mode).
arg_level(none) -> ?Z_NO_COMPRESSION;
arg_level(best_speed) -> ?Z_BEST_SPEED;
arg_level(best_compression) -> ?Z_BEST_COMPRESSION;
arg_level(default) -> ?Z_DEFAULT_COMPRESSION;
arg_level(Level) when is_integer(Level), Level >= 0, Level =< 9 -> Level;
-arg_level(_) -> erlang:error(badarg).
-
+arg_level(_) -> erlang:error(bad_compression_level).
+
arg_strategy(filtered) -> ?Z_FILTERED;
arg_strategy(huffman_only) -> ?Z_HUFFMAN_ONLY;
arg_strategy(rle) -> ?Z_RLE;
arg_strategy(default) -> ?Z_DEFAULT_STRATEGY;
-arg_strategy(_) -> erlang:error(badarg).
+arg_strategy(_) -> erlang:error(bad_compression_strategy).
arg_method(deflated) -> ?Z_DEFLATED;
-arg_method(_) -> erlang:error(badarg).
+arg_method(_) -> erlang:error(bad_compression_method).
-spec arg_bitsz(zwindowbits()) -> zwindowbits().
arg_bitsz(Bits) when is_integer(Bits) andalso
- ((8 =< Bits andalso Bits < 48) orelse
- (-15 =< Bits andalso Bits =< -8)) ->
+ ((8 =< Bits andalso Bits < 48) orelse
+ (-15 =< Bits andalso Bits =< -8)) ->
Bits;
-arg_bitsz(_) -> erlang:error(badarg).
+arg_bitsz(_) -> erlang:error(bad_windowbits).
-spec arg_mem(zmemlevel()) -> zmemlevel().
arg_mem(Level) when is_integer(Level), 1 =< Level, Level =< 9 -> Level;
-arg_mem(_) -> erlang:error(badarg).
-
-call(Z, Cmd, Arg) ->
- try port_control(Z, Cmd, Arg) of
- [0|Res] -> list_to_atom(Res);
- [1|Res] ->
- flush(Z),
- erlang:error(list_to_atom(Res));
- [2,A,B,C,D] ->
- (A bsl 24)+(B bsl 16)+(C bsl 8)+D;
- [3,A,B,C,D] ->
- erlang:error({need_dictionary,(A bsl 24)+(B bsl 16)+(C bsl 8)+D});
- [4, _, _, _, _] ->
- inflate_has_more
- catch
- error:badarg -> %% Rethrow loses port_control from stacktrace.
- erlang:error(badarg)
- end.
+arg_mem(_) -> erlang:error(bad_memlevel).
-reverse(X) ->
- reverse(X, []).
+-spec enqueue_input(Z, IOData) -> ok when
+ Z :: zstream(),
+ IOData :: iodata().
+enqueue_input(Z, IOData) ->
+ enqueue_input_1(Z, erlang:iolist_to_iovec(IOData)).
+
+enqueue_input_1(_Z, []) ->
+ ok;
+enqueue_input_1(Z, IOVec) ->
+ case enqueue_nif(Z, IOVec) of
+ {continue, Remainder} -> enqueue_input_1(Z, Remainder);
+ ok -> ok
+ end.
-reverse([H|T], Y) ->
- reverse(T, [H|Y]);
-reverse([], X) ->
- X.
+enqueue_nif(_Z, _IOVec) ->
+ erlang:nif_error(undef). \ No newline at end of file
diff --git a/erts/test/system_smoke.spec b/erts/test/system_smoke.spec
index 99092c1dab..bcb727b6d9 100644
--- a/erts/test/system_smoke.spec
+++ b/erts/test/system_smoke.spec
@@ -1,4 +1,3 @@
-{suites,"../system_test",[ethread_SUITE]}.
{cases,"../system_test",otp_SUITE,
[undefined_functions,
deprecated_not_in_obsolete,
diff --git a/erts/vsn.mk b/erts/vsn.mk
index 05f3b4364e..8ed3993177 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -18,7 +18,7 @@
# %CopyrightEnd%
#
-VSN = 9.0
+VSN = 9.0.4
# Port number 4365 in 4.2
# Port number 4366 in 4.3