aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/autoconf/vxworks/sed.general2
-rw-r--r--erts/autoconf/vxworks/sed.vxworks_cpu322
-rw-r--r--erts/autoconf/vxworks/sed.vxworks_ppc322
-rw-r--r--erts/autoconf/vxworks/sed.vxworks_ppc6032
-rw-r--r--erts/autoconf/vxworks/sed.vxworks_ppc603_nolongcall2
-rw-r--r--erts/autoconf/vxworks/sed.vxworks_ppc8602
-rw-r--r--erts/autoconf/vxworks/sed.vxworks_simlinux2
-rw-r--r--erts/autoconf/vxworks/sed.vxworks_simso2
-rw-r--r--erts/autoconf/vxworks/sed.vxworks_sparc2
-rw-r--r--erts/configure.in5
-rw-r--r--erts/doc/src/absform.xml4
-rw-r--r--erts/doc/src/erl_nif.xml30
-rw-r--r--erts/doc/src/escript.xml26
-rw-r--r--erts/doc/src/notes.xml74
-rw-r--r--erts/doc/src/time_correction.xml2
-rw-r--r--erts/emulator/Makefile.in2
-rw-r--r--erts/emulator/beam/dist.c342
-rw-r--r--erts/emulator/beam/dist.h8
-rw-r--r--erts/emulator/beam/erl_bif_lists.c162
-rw-r--r--erts/emulator/beam/erl_db.c40
-rw-r--r--erts/emulator/beam/erl_db.h2
-rw-r--r--erts/emulator/beam/erl_db_catree.c2174
-rw-r--r--erts/emulator/beam/erl_db_catree.h91
-rw-r--r--erts/emulator/beam/erl_db_tree.c914
-rw-r--r--erts/emulator/beam/erl_db_tree_util.h151
-rw-r--r--erts/emulator/beam/erl_db_util.h29
-rw-r--r--erts/emulator/beam/erl_drv_nif.h3
-rw-r--r--erts/emulator/beam/erl_lock_check.c54
-rw-r--r--erts/emulator/beam/erl_lock_check.h4
-rw-r--r--erts/emulator/beam/erl_nif.h2
-rw-r--r--erts/emulator/beam/erl_node_tables.c80
-rw-r--r--erts/emulator/beam/erl_node_tables.h7
-rw-r--r--erts/emulator/beam/erl_port.h10
-rw-r--r--erts/emulator/beam/erl_process.c2
-rw-r--r--erts/emulator/beam/external.c80
-rw-r--r--erts/emulator/beam/external.h2
-rw-r--r--erts/emulator/beam/global.h2
-rw-r--r--erts/emulator/beam/io.c34
-rw-r--r--erts/emulator/drivers/common/inet_drv.c685
-rw-r--r--erts/emulator/pcre/LICENCE93
-rw-r--r--erts/emulator/pcre/README.pcre_update.md6
-rw-r--r--erts/emulator/pcre/local_config.h2
-rw-r--r--erts/emulator/pcre/pcre-8.41.tar.bz2bin1561874 -> 0 bytes
-rw-r--r--erts/emulator/pcre/pcre-8.42.tar.bz2bin0 -> 1570171 bytes
-rw-r--r--erts/emulator/pcre/pcre.h12
-rw-r--r--erts/emulator/pcre/pcre_chartables.c2
-rw-r--r--erts/emulator/pcre/pcre_compile.c2
-rw-r--r--erts/emulator/pcre/pcre_dfa_exec.c4
-rw-r--r--erts/emulator/pcre/pcre_exec.c8
-rw-r--r--erts/emulator/pcre/pcre_jit_compile.c407
-rw-r--r--erts/emulator/pcre/pcre_latin_1_table.c3
-rw-r--r--erts/emulator/sys/common/erl_check_io.c78
-rw-r--r--erts/emulator/sys/common/erl_osenv.c2
-rw-r--r--erts/emulator/sys/unix/erl_child_setup.c11
-rw-r--r--erts/emulator/sys/unix/sys.c2
-rw-r--r--erts/emulator/sys/win32/erl_poll.c2
-rw-r--r--erts/emulator/test/nif_SUITE.erl22
-rw-r--r--erts/emulator/test/node_container_SUITE.erl30
-rw-r--r--erts/emulator/test/ref_SUITE.erl2
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl6
-rw-r--r--erts/preloaded/ebin/prim_file.beambin27800 -> 27780 bytes
-rw-r--r--erts/preloaded/ebin/prim_inet.beambin77932 -> 79028 bytes
-rw-r--r--erts/preloaded/src/erts.app.src2
-rw-r--r--erts/preloaded/src/prim_file.erl2
-rw-r--r--erts/preloaded/src/prim_inet.erl85
-rw-r--r--erts/vsn.mk2
66 files changed, 4783 insertions, 1041 deletions
diff --git a/erts/autoconf/vxworks/sed.general b/erts/autoconf/vxworks/sed.general
index a30eb51169..0e99b4dba4 100644
--- a/erts/autoconf/vxworks/sed.general
+++ b/erts/autoconf/vxworks/sed.general
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/erts/autoconf/vxworks/sed.vxworks_cpu32 b/erts/autoconf/vxworks/sed.vxworks_cpu32
index 961adc4104..26e4f4c7ad 100644
--- a/erts/autoconf/vxworks/sed.vxworks_cpu32
+++ b/erts/autoconf/vxworks/sed.vxworks_cpu32
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/erts/autoconf/vxworks/sed.vxworks_ppc32 b/erts/autoconf/vxworks/sed.vxworks_ppc32
index 1e2e760abc..44697aadc2 100644
--- a/erts/autoconf/vxworks/sed.vxworks_ppc32
+++ b/erts/autoconf/vxworks/sed.vxworks_ppc32
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2006-2016. All Rights Reserved.
+# Copyright Ericsson AB 2006-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/erts/autoconf/vxworks/sed.vxworks_ppc603 b/erts/autoconf/vxworks/sed.vxworks_ppc603
index 5beaae4fc9..4fdfd51273 100644
--- a/erts/autoconf/vxworks/sed.vxworks_ppc603
+++ b/erts/autoconf/vxworks/sed.vxworks_ppc603
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2000-2016. All Rights Reserved.
+# Copyright Ericsson AB 2000-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/erts/autoconf/vxworks/sed.vxworks_ppc603_nolongcall b/erts/autoconf/vxworks/sed.vxworks_ppc603_nolongcall
index 2c2f2fa372..d86876e90e 100644
--- a/erts/autoconf/vxworks/sed.vxworks_ppc603_nolongcall
+++ b/erts/autoconf/vxworks/sed.vxworks_ppc603_nolongcall
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/erts/autoconf/vxworks/sed.vxworks_ppc860 b/erts/autoconf/vxworks/sed.vxworks_ppc860
index 71cf887476..a5c4c2d5c3 100644
--- a/erts/autoconf/vxworks/sed.vxworks_ppc860
+++ b/erts/autoconf/vxworks/sed.vxworks_ppc860
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/erts/autoconf/vxworks/sed.vxworks_simlinux b/erts/autoconf/vxworks/sed.vxworks_simlinux
index 06b1847602..1a2bbd6236 100644
--- a/erts/autoconf/vxworks/sed.vxworks_simlinux
+++ b/erts/autoconf/vxworks/sed.vxworks_simlinux
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2008-2016. All Rights Reserved.
+# Copyright Ericsson AB 2008-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/erts/autoconf/vxworks/sed.vxworks_simso b/erts/autoconf/vxworks/sed.vxworks_simso
index 07606cad80..8d15e87a1b 100644
--- a/erts/autoconf/vxworks/sed.vxworks_simso
+++ b/erts/autoconf/vxworks/sed.vxworks_simso
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2005-2016. All Rights Reserved.
+# Copyright Ericsson AB 2005-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/erts/autoconf/vxworks/sed.vxworks_sparc b/erts/autoconf/vxworks/sed.vxworks_sparc
index 59431fddf9..118b01d16d 100644
--- a/erts/autoconf/vxworks/sed.vxworks_sparc
+++ b/erts/autoconf/vxworks/sed.vxworks_sparc
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/erts/configure.in b/erts/configure.in
index 2d9df13844..d211b81878 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -561,7 +561,8 @@ if test "X$PROFILE_INSTR_GENERATE" = "Xtrue"; then
PROFILE_INSTR_USE=false])
rm -f default.profdata
fi],
- [])
+ [],
+ [AC_MSG_NOTICE([Disabling PGO when cross-compiling])])
rm -f *.profraw
CFLAGS=$saved_CFLAGS;
fi
@@ -582,7 +583,7 @@ AC_MSG_CHECKING([whether to do PGO of erts])
if test $enable_pgo = no; then
AC_MSG_RESULT([no, disabled by user])
elif test $CROSS_COMPILING = yes; then
- if $enable_pgo = yes; then
+ if test $enable_pgo = yes; then
AC_MSG_ERROR(cannot use PGO when cross-compiling)
else
AC_MSG_RESULT([no, cross compiling])
diff --git a/erts/doc/src/absform.xml b/erts/doc/src/absform.xml
index f29bb7b8f9..d77d989057 100644
--- a/erts/doc/src/absform.xml
+++ b/erts/doc/src/absform.xml
@@ -811,7 +811,9 @@
</item>
<item>
<p>If T is the empty list type <c>[]</c>, then Rep(T) =
- <c>{type,Line,nil,[]}</c>.</p>
+ <c>{type,Line,nil,[]}</c>, that is, the empty list type
+ <c>[]</c> cannot be distinguished from the predefined type
+ <c>nil()</c>.</p>
</item>
<item>
<p>If T is a fun type <c>fun()</c>, then Rep(T) =
diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml
index a20b8ee884..a5826307f7 100644
--- a/erts/doc/src/erl_nif.xml
+++ b/erts/doc/src/erl_nif.xml
@@ -3037,7 +3037,8 @@ enif_map_iterator_destroy(env, &amp;iter);</code>
<p>Argument <c>mode</c> describes the type of events to wait for. It can be
<c>ERL_NIF_SELECT_READ</c>, <c>ERL_NIF_SELECT_WRITE</c> or a bitwise
OR combination to wait for both. It can also be <c>ERL_NIF_SELECT_STOP</c>
- which is described further below. When a read or write event is triggered,
+ or <c>ERL_NIF_SELECT_CANCEL</c> which are described further
+ below. When a read or write event is triggered,
a notification message like this is sent to the process identified by
<c>pid</c>:</p>
<code type="none">{select, Obj, Ref, ready_input | ready_output}</code>
@@ -3058,13 +3059,21 @@ enif_map_iterator_destroy(env, &amp;iter);</code>
<p>The notifications are one-shot only. To receive further notifications of the same
type (read or write), repeated calls to <c>enif_select</c> must be made
after receiving each notification.</p>
+ <p><c>ERL_NIF_SELECT_CANCEL</c> can be used to cancel previously
+ selected events. It must be used in a bitwise OR combination with
+ <c>ERL_NIF_SELECT_READ</c> and/or <c>ERL_NIF_SELECT_WRITE</c> to
+ indicate which type of event to cancel. The return value will
+ tell if the event was actualy cancelled or if a notification may
+ already have been sent.</p>
<p>Use <c>ERL_NIF_SELECT_STOP</c> as <c>mode</c> in order to safely
close an event object that has been passed to <c>enif_select</c>. The
<seealso marker="#ErlNifResourceStop"><c>stop</c></seealso> callback
of the resource <c>obj</c> will be called when it is safe to close
the event object. This safe way of closing event objects must be used
- even if all notifications have been received and no further calls to
- <c>enif_select</c> have been made.</p>
+ even if all notifications have been received (or cancelled) and no
+ further calls to <c>enif_select</c> have been made.
+ <c>ERL_NIF_SELECT_STOP</c> will first cancel any selected events
+ before it calls or schedules the <c>stop</c> callback.</p>
<p>The first call to <c>enif_select</c> for a specific OS <c>event</c> will establish
a relation between the event object and the containing resource. All subsequent calls
for an <c>event</c> must pass its containing resource as argument
@@ -3086,7 +3095,15 @@ enif_map_iterator_destroy(env, &amp;iter);</code>
<item>The stop callback was called directly by <c>enif_select</c>.</item>
<tag><c>ERL_NIF_SELECT_STOP_SCHEDULED</c></tag>
<item>The stop callback was scheduled to run on some other thread
- or later by this thread.</item>
+ or later by this thread.</item>
+ <tag><c>ERL_NIF_SELECT_READ_CANCELLED</c></tag>
+ <item>A read event was cancelled by <c>ERL_NIF_SELECT_CANCEL</c> or
+ <c>ERL_NIF_SELECT_STOP</c> and is guaranteed not to generate a
+ <c>ready_input</c> notification message.</item>
+ <tag><c>ERL_NIF_SELECT_WRITE_CANCELLED</c></tag>
+ <item>A write event was cancelled by <c>ERL_NIF_SELECT_CANCEL</c> or
+ <c>ERL_NIF_SELECT_STOP</c> and is guaranteed not to generate a
+ <c>ready_output</c> notification message.</item>
</taglist>
<p>Returns a negative value if the call failed where the following bits can be set:</p>
<taglist>
@@ -3112,6 +3129,11 @@ if (retval &amp; ERL_NIF_SELECT_STOP_CALLED) {
}
</code>
</note>
+ <note><p>The mode flag <c>ERL_NIF_SELECT_CANCEL</c> and the return flags
+ <c>ERL_NIF_SELECT_READ_CANCELLED</c> and
+ <c>ERL_NIF_SELECT_WRITE_CANCELLED</c> were introduced in erts-11.0
+ (OTP-22.0).</p>
+ </note>
</desc>
</func>
diff --git a/erts/doc/src/escript.xml b/erts/doc/src/escript.xml
index 9b0d42185e..be1664b39f 100644
--- a/erts/doc/src/escript.xml
+++ b/erts/doc/src/escript.xml
@@ -4,7 +4,7 @@
<comref>
<header>
<copyright>
- <year>2007</year><year>2017</year>
+ <year>2007</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -155,9 +155,12 @@ io:setopts([{encoding, unicode}])</code>
for example:</p>
<pre>
halt(1).</pre>
- <p>To retrieve the pathname of the script, call
- <seealso marker="#script_name_0">
- <c>escript:script_name()</c></seealso> from your script
+ <p>
+ To retrieve the pathname of the script, call
+ <seealso marker="#script_name-0">
+ <c>escript:script_name()</c>
+ </seealso>
+ from your script
(the pathname is usually, but not always, absolute).</p>
<p>If the file contains source code (as in the example above),
it is processed by the
@@ -229,6 +232,7 @@ $ <input>escript factorial.beam 5</input>
factorial 5 = 120
$ <input>escript factorial.zip 5</input>
factorial 5 = 120</pre>
+ <marker id="create-2"/>
</desc>
</func>
@@ -259,7 +263,7 @@ factorial 5 = 120</pre>
zip:create_option()</seealso>]</v>
</type>
<desc>
- <p><marker id="create_2"></marker>
+ <p>
Creates an escript from a list of sections. The
sections can be specified in any order. An escript begins with an
optional <c>Header</c> followed by a mandatory <c>Body</c>. If
@@ -344,6 +348,7 @@ ok
{{2010,3,2},{0,59,22}},
54,1,0,0,0,0,0},
&lt;&lt;"%% demo.erl\n-module(demo).\n-export([main/1]).\n\n%% Demo\nmain(_Arg"...&gt;&gt;}]}</pre>
+ <marker id="extract-2"/>
</desc>
</func>
@@ -368,9 +373,11 @@ ok
<v>SourceCode = BeamCode = ZipArchive = binary()</v>
</type>
<desc>
- <p><marker id="extract_2"></marker>
- Parses an escript and extracts its sections. This is the reverse
- of <seealso marker="#create_2"><c>create/2</c></seealso>.</p>
+ <p>
+ Parses an escript and extracts its sections.
+ This is the reverse of
+ <seealso marker="#create-2"><c>create/2</c></seealso>.
+ </p>
<p>All sections are returned even if they do not exist in the
escript. If a particular section happens to have the same
value as the default value, the extracted value is set to the
@@ -393,6 +400,7 @@ ok
{ok,[{{archive,&lt;&lt;80,75,3,4,20,0,0,0,8,0,118,7,98,60,105,
152,61,93,107,0,0,0,118,0,...&gt;&gt;}
{emu_args,undefined}]}</pre>
+ <marker id="script_name-0"/>
</desc>
</func>
@@ -403,7 +411,7 @@ ok
<v>File = filename()</v>
</type>
<desc>
- <p><marker id="script_name_0"></marker>
+ <p>
Returns the name of the escript that is executed.
If the function is invoked outside the context
of an escript, the behavior is undefined.</p>
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index 9619a953f4..56e2818a1c 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -31,6 +31,64 @@
</header>
<p>This document describes the changes made to the ERTS application.</p>
+<section><title>Erts 10.0.8</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ As of ERTS version 10.0 (OTP 21.0) the
+ <c>erl_child_setup</c> program, which creates port
+ programs, ignores <c>TERM</c> signals. This setting was
+ unintentionally inherited by port programs. Handling of
+ <c>TERM</c> signals in port programs has now been
+ restored to the default behavior. That is, terminate the
+ process.</p>
+ <p>
+ Own Id: OTP-15289 Aux Id: ERIERL-235, OTP-14943, ERL-576 </p>
+ </item>
+ <item>
+ <p>
+ The fix made for OTP-15279 in erts-10.07 (OTP-21.0.8) was
+ not complete. It could cause a new connection attempt to
+ be incorrectly aborted in certain cases. This fix will
+ amend that flaw.</p>
+ <p>
+ Own Id: OTP-15296 Aux Id: OTP-15279, ERIERL-226 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 10.0.7</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A process could get stuck in an infinite rescheduling
+ loop between normal and dirty schedulers. This bug was
+ introduced in ERTS version 10.0.</p>
+ <p>
+ Thanks to Maxim Fedorov for finding and fixing this
+ issue.</p>
+ <p>
+ Own Id: OTP-15275 Aux Id: PR-1943 </p>
+ </item>
+ <item>
+ <p>
+ Garbage collection of a distribution entry could cause an
+ emulator crash if <c>net_kernel</c> had not brought
+ previous connection attempts on it down properly.</p>
+ <p>
+ Own Id: OTP-15279 Aux Id: ERIERL-226 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 10.0.6</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -2618,6 +2676,22 @@
</section>
+<section><title>Erts 8.3.5.6</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fixed small memory leak that could occur when sending to
+ a terminating port.</p>
+ <p>
+ Own Id: OTP-14609 Aux Id: ERIERL-238 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 8.3.5.5</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/erts/doc/src/time_correction.xml b/erts/doc/src/time_correction.xml
index 53b555387c..a9f06d8a7d 100644
--- a/erts/doc/src/time_correction.xml
+++ b/erts/doc/src/time_correction.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>1999</year><year>2016</year>
+ <year>1999</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 054692819e..eeb9577a33 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -874,7 +874,7 @@ RUN_OBJS += \
$(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o \
$(OBJDIR)/erl_ptab.o $(OBJDIR)/erl_map.o \
$(OBJDIR)/erl_msacc.o $(OBJDIR)/erl_lock_flags.o \
- $(OBJDIR)/erl_io_queue.o
+ $(OBJDIR)/erl_io_queue.o $(OBJDIR)/erl_db_catree.o
LTTNG_OBJS = $(OBJDIR)/erlang_lttng.o
NIF_OBJS = \
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 9232781687..15642e1669 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -116,11 +116,12 @@ static Export *dist_ctrl_put_data_trap;
/* forward declarations */
-static void clear_dist_entry(DistEntry*);
static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy);
static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm);
static void init_nodes_monitors(void);
static Sint abort_connection(DistEntry* dep, Uint32 conn_id);
+static ErtsDistOutputBuf* clear_de_out_queues(DistEntry*);
+static void free_de_out_queues(DistEntry*, ErtsDistOutputBuf*);
static erts_atomic_t no_caches;
static erts_atomic_t no_nodes;
@@ -556,7 +557,10 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
}
}
else { /* Call from distribution controller (port/process) */
- ErtsMonLnkDist *mld;
+ ErtsMonLnkDist *mld;
+ ErtsAtomCache *cache;
+ ErtsProcList *suspendees;
+ ErtsDistOutputBuf *obuf;
Uint32 flags;
erts_atomic_set_mb(&dep->dist_cmd_scheduled, 1);
@@ -586,6 +590,22 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
nodename = dep->sysname;
flags = dep->flags;
+ erts_atomic_set_nob(&dep->input_handler, (erts_aint_t) NIL);
+ cache = dep->cache;
+ dep->cache = NULL;
+
+ erts_mtx_lock(&dep->qlock);
+
+ erts_atomic64_set_nob(&dep->in, 0);
+ erts_atomic64_set_nob(&dep->out, 0);
+
+ obuf = clear_de_out_queues(dep);
+ suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL);
+
+ erts_mtx_unlock(&dep->qlock);
+ erts_atomic_set_nob(&dep->dist_cmd_scheduled, 0);
+ dep->send = NULL;
+
erts_set_dist_entry_not_connected(dep);
erts_de_rwunlock(dep);
@@ -599,7 +619,13 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
? am_connection_closed
: reason));
- clear_dist_entry(dep);
+ erts_resume_processes(suspendees);
+
+ delete_cache(cache);
+
+ free_de_out_queues(dep, obuf);
+ if (dep->transcode_ctx)
+ transcode_free_ctx(dep);
}
dec_no_nodes();
@@ -730,41 +756,6 @@ static void free_de_out_queues(DistEntry* dep, ErtsDistOutputBuf *obuf)
}
}
-static void clear_dist_entry(DistEntry *dep)
-{
- ErtsAtomCache *cache;
- ErtsProcList *suspendees;
- ErtsDistOutputBuf *obuf;
-
- erts_de_rwlock(dep);
- erts_atomic_set_nob(&dep->input_handler,
- (erts_aint_t) NIL);
- cache = dep->cache;
- dep->cache = NULL;
-
- erts_mtx_lock(&dep->qlock);
-
- erts_atomic64_set_nob(&dep->in, 0);
- erts_atomic64_set_nob(&dep->out, 0);
-
- obuf = clear_de_out_queues(dep);
- dep->state = ERTS_DE_STATE_IDLE;
- suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL);
-
- erts_mtx_unlock(&dep->qlock);
- erts_atomic_set_nob(&dep->dist_cmd_scheduled, 0);
- dep->send = NULL;
- erts_de_rwunlock(dep);
-
- erts_resume_processes(suspendees);
-
- delete_cache(cache);
-
- free_de_out_queues(dep, obuf);
- if (dep->transcode_ctx)
- transcode_free_ctx(dep);
-}
-
int erts_dsend_context_dtor(Binary* ctx_bin)
{
ErtsSendContext* ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
@@ -859,7 +850,7 @@ erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
DeclareTmpHeapNoproc(ctl_heap,6);
int res;
- if (~dsdp->dep->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
+ if (~dsdp->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
/*
* Receiver does not support DOP_MONITOR_P_EXIT (see dsig_send_monitor)
*/
@@ -887,7 +878,7 @@ erts_dsig_send_monitor(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
DeclareTmpHeapNoproc(ctl_heap,5);
int res;
- if (~dsdp->dep->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
+ if (~dsdp->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
/*
* Receiver does not support DOP_MONITOR_P.
* Just avoid sending it and by doing that reduce this monitor
@@ -918,7 +909,7 @@ erts_dsig_send_demonitor(ErtsDSigData *dsdp, Eterm watcher,
DeclareTmpHeapNoproc(ctl_heap,5);
int res;
- if (~dsdp->dep->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
+ if (~dsdp->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
/*
* Receiver does not support DOP_DEMONITOR_P (see dsig_send_monitor)
*/
@@ -938,7 +929,7 @@ erts_dsig_send_demonitor(ErtsDSigData *dsdp, Eterm watcher,
static int can_send_seqtrace_token(ErtsSendContext* ctx, Eterm token) {
Eterm label;
- if (ctx->dep->flags & DFLAG_BIG_SEQTRACE_LABELS) {
+ if (ctx->dsd.flags & DFLAG_BIG_SEQTRACE_LABELS) {
/* The other end is capable of handling arbitrary seq_trace labels. */
return 1;
}
@@ -999,7 +990,7 @@ erts_dsig_send_msg(Eterm remote, Eterm message, ErtsSendContext* ctx)
send_token = (token != NIL && can_send_seqtrace_token(ctx, token));
- if (ctx->dep->flags & DFLAG_SEND_SENDER) {
+ if (ctx->dsd.flags & DFLAG_SEND_SENDER) {
dist_op = make_small(send_token ?
DOP_SEND_SENDER_TT :
DOP_SEND_SENDER);
@@ -1216,13 +1207,13 @@ erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote)
int erts_net_message(Port *prt,
DistEntry *dep,
+ Uint32 conn_id,
byte *hbuf,
ErlDrvSizeT hlen,
byte *buf,
ErlDrvSizeT len)
{
ErtsDistExternal ede;
- byte *t;
Sint ctl_len;
Eterm arg;
Eterm from, to;
@@ -1240,7 +1231,6 @@ int erts_net_message(Port *prt,
Eterm token_size;
Uint tuple_arity;
int res;
- Uint32 connection_id;
#ifdef ERTS_DIST_MSG_DBG
ErlDrvSizeT orig_len = len;
#endif
@@ -1256,7 +1246,6 @@ int erts_net_message(Port *prt,
return 0;
}
-
ASSERT(hlen == 0);
if (len == 0) { /* HANDLE TICK !!! */
@@ -1269,15 +1258,7 @@ int erts_net_message(Port *prt,
bw(buf, len);
#endif
- if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)
- t = buf;
- else {
- /* Skip PASS_THROUGH */
- t = buf+1;
- len--;
- }
-
- res = erts_prepare_dist_ext(&ede, t, len, dep, dep->cache, &connection_id);
+ res = erts_prepare_dist_ext(&ede, buf, len, dep, conn_id, dep->cache);
switch (res) {
case ERTS_PREP_DIST_EXT_CLOSED:
@@ -1319,10 +1300,9 @@ int erts_net_message(Port *prt,
PURIFY_MSG("data error");
goto decode_error;
}
- ctl_len = t - buf;
#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, "<<%s CTL: %T\n", len != orig_len ? "P" : " ", arg);
+ erts_fprintf(stderr, "<< CTL: %T\n", arg);
#endif
if (is_not_tuple(arg) ||
@@ -1770,7 +1750,7 @@ decode_error:
}
data_error:
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
- erts_kill_dist_connection(dep, connection_id);
+ erts_kill_dist_connection(dep, conn_id);
ERTS_CHK_NO_PROC_LOCKS;
return -1;
}
@@ -1826,7 +1806,7 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
while (1) {
switch (ctx->phase) {
case ERTS_DSIG_SEND_PHASE_INIT:
- ctx->flags = dsdp->dep->flags;
+ ctx->flags = dsdp->flags;
ctx->c_p = dsdp->proc;
if (!ctx->c_p || dsdp->no_suspend)
@@ -2087,13 +2067,14 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(dist_output)) {
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
"%T", prt->common.id);
erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
- "%T", prt->dist_entry->sysname);
+ "%T", dep->sysname);
DTRACE4(dist_output, erts_this_node_sysname, port_str,
remote_str, size);
}
@@ -2149,13 +2130,14 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(dist_outputv)) {
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
"%T", prt->common.id);
erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
- "%T", prt->dist_entry->sysname);
+ "%T", dep->sysname);
DTRACE4(dist_outputv, erts_this_node_sysname, port_str,
remote_str, size);
}
@@ -2193,7 +2175,7 @@ erts_dist_command(Port *prt, int initial_reds)
Uint32 flags;
Sint qsize, obufsize = 0;
ErtsDistOutputQueue oq, foq;
- DistEntry *dep = prt->dist_entry;
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf);
erts_aint32_t sched_flags;
ErtsSchedulerData *esdp = erts_get_scheduler_data();
@@ -2569,11 +2551,12 @@ dist_ctrl_get_data_notification_1(BIF_ALIST_1)
erts_aint32_t qflgs;
erts_aint_t qsize;
Eterm receiver = NIL;
+ Uint32 conn_id;
if (!dep)
BIF_ERROR(BIF_P, EXC_NOTSUP);
- if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep)
+ if (erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id) != dep)
BIF_ERROR(BIF_P, BADARG);
/*
@@ -2583,6 +2566,11 @@ dist_ctrl_get_data_notification_1(BIF_ALIST_1)
erts_de_rlock(dep);
+ if (dep->connection_id != conn_id) {
+ erts_de_runlock(dep);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
ASSERT(dep->cid == BIF_P->common.id);
qflgs = erts_atomic32_read_acqb(&dep->qflgs);
@@ -2623,6 +2611,7 @@ dist_ctrl_put_data_2(BIF_ALIST_2)
DistEntry *dep;
ErlDrvSizeT size;
Eterm input_handler;
+ Uint32 conn_id;
if (is_binary(BIF_ARG_2))
size = binary_size(BIF_ARG_2);
@@ -2634,7 +2623,7 @@ dist_ctrl_put_data_2(BIF_ALIST_2)
else
BIF_ERROR(BIF_P, BADARG);
- dep = erts_dhandle_to_dist_entry(BIF_ARG_1);
+ dep = erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id);
if (!dep)
BIF_ERROR(BIF_P, BADARG);
@@ -2654,7 +2643,7 @@ dist_ctrl_put_data_2(BIF_ALIST_2)
erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- (void) erts_net_message(NULL, dep, NULL, 0, data, size);
+ (void) erts_net_message(NULL, dep, conn_id, NULL, 0, data, size);
/*
* We ignore any decode failures. On fatal failures the
* connection will be taken down by killing the
@@ -2678,13 +2667,18 @@ dist_get_stat_1(BIF_ALIST_1)
Sint64 read, write, pend;
Eterm res, *hp, **hpp;
Uint sz, *szp;
- DistEntry *dep = erts_dhandle_to_dist_entry(BIF_ARG_1);
+ Uint32 conn_id;
+ DistEntry *dep = erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id);
if (!dep)
BIF_ERROR(BIF_P, BADARG);
erts_de_rlock(dep);
+ if (dep->connection_id != conn_id) {
+ erts_de_runlock(dep);
+ BIF_ERROR(BIF_P, BADARG);
+ }
read = (Sint64) erts_atomic64_read_nob(&dep->in);
write = (Sint64) erts_atomic64_read_nob(&dep->out);
pend = (Sint64) erts_atomic_read_nob(&dep->qsize);
@@ -2715,19 +2709,25 @@ BIF_RETTYPE
dist_ctrl_input_handler_2(BIF_ALIST_2)
{
DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P);
+ Uint32 conn_id;
if (!dep)
BIF_ERROR(BIF_P, EXC_NOTSUP);
- if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep)
+ if (erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id) != dep)
BIF_ERROR(BIF_P, BADARG);
if (is_not_internal_pid(BIF_ARG_2))
BIF_ERROR(BIF_P, BADARG);
+ erts_de_rlock(dep);
+ if (dep->connection_id != conn_id) {
+ erts_de_runlock(dep);
+ BIF_ERROR(BIF_P, BADARG);
+ }
erts_atomic_set_nob(&dep->input_handler,
(erts_aint_t) BIF_ARG_2);
-
+ erts_de_runlock(dep);
BIF_RET(am_ok);
}
@@ -2741,15 +2741,21 @@ dist_ctrl_get_data_1(BIF_ALIST_1)
Eterm *hp;
ProcBin *pb;
erts_aint_t qsize;
+ Uint32 conn_id;
if (!dep)
BIF_ERROR(BIF_P, EXC_NOTSUP);
- if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep)
+ if (erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id) != dep)
BIF_ERROR(BIF_P, BADARG);
erts_de_rlock(dep);
+ if (dep->connection_id != conn_id) {
+ erts_de_runlock(dep);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
if (dep->state == ERTS_DE_STATE_EXITING)
goto return_none;
@@ -2836,13 +2842,14 @@ erts_dist_port_not_busy(Port *prt)
{
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(dist_port_not_busy)) {
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
"%T", prt->common.id);
erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
- "%T", prt->dist_entry->sysname);
+ "%T", dep->sysname);
DTRACE3(dist_port_not_busy, erts_this_node_sysname,
port_str, remote_str);
}
@@ -2868,10 +2875,10 @@ static void kill_connection(DistEntry *dep)
}
void
-erts_kill_dist_connection(DistEntry *dep, Uint32 connection_id)
+erts_kill_dist_connection(DistEntry *dep, Uint32 conn_id)
{
erts_de_rwlock(dep);
- if (connection_id == dep->connection_id
+ if (conn_id == dep->connection_id
&& dep->state == ERTS_DE_STATE_CONNECTED) {
kill_connection(dep);
@@ -3207,23 +3214,6 @@ BIF_RETTYPE erts_internal_create_dist_channel_4(BIF_ALIST_4)
else if (!dep)
goto system_limit; /* Should never happen!!! */
- erts_de_rlock(dep);
- de_locked = -1;
-
- if (dep->state == ERTS_DE_STATE_EXITING) {
- /* Suspend on dist entry waiting for the exit to finish */
- ErtsProcList *plp = erts_proclist_create(BIF_P);
- plp->next = NULL;
- erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
- erts_mtx_lock(&dep->qlock);
- erts_proclist_store_last(&dep->suspended, plp);
- erts_mtx_unlock(&dep->qlock);
- goto yield;
- }
-
- erts_de_runlock(dep);
- de_locked = 0;
-
if (is_internal_pid(BIF_ARG_2)) {
if (BIF_P->common.id == BIF_ARG_2) {
ErtsSetupConnDistCtrl scdc;
@@ -3269,7 +3259,7 @@ BIF_RETTYPE erts_internal_create_dist_channel_4(BIF_ALIST_4)
hp = HAlloc(BIF_P, 3);
}
else {
- int new;
+ Uint32 conn_id;
pp = erts_id2port_sflgs(BIF_ARG_2,
BIF_P,
@@ -3278,7 +3268,7 @@ BIF_RETTYPE erts_internal_create_dist_channel_4(BIF_ALIST_4)
erts_de_rwlock(dep);
de_locked = 1;
- if (dep->state == ERTS_DE_STATE_EXITING)
+ if (dep->state != ERTS_DE_STATE_PENDING)
goto badarg;
if (!pp || (erts_atomic32_read_nob(&pp->state)
@@ -3288,49 +3278,39 @@ BIF_RETTYPE erts_internal_create_dist_channel_4(BIF_ALIST_4)
if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0)
goto badarg;
- if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep)
- new = 0;
- else {
- if (dep->state != ERTS_DE_STATE_PENDING) {
- if (dep->state == ERTS_DE_STATE_IDLE)
- erts_set_dist_entry_pending(dep);
- else
- goto badarg;
- }
-
- if (pp->dist_entry || is_not_nil(dep->cid))
- goto badarg;
-
- erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
+ if (erts_prtsd_get(pp, ERTS_PRTSD_DIST_ENTRY) != NULL
+ || is_not_nil(dep->cid))
+ goto badarg;
- pp->dist_entry = dep;
+ erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
- ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output);
+ erts_prtsd_set(pp, ERTS_PRTSD_DIST_ENTRY, dep);
+ erts_prtsd_set(pp, ERTS_PRTSD_CONN_ID, (void*)(UWord)dep->connection_id);
- dep->send = (pp->drv_ptr->outputv
- ? dist_port_commandv
- : dist_port_command);
- ASSERT(dep->send);
+ ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output);
- /*
- * Dist-ports do not use the "busy port message queue" functionality, but
- * instead use "busy dist entry" functionality.
- */
- {
- ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
- erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL);
- }
+ dep->send = (pp->drv_ptr->outputv
+ ? dist_port_commandv
+ : dist_port_command);
+ ASSERT(dep->send);
- setup_connection_epiloge_rwunlock(BIF_P, dep, BIF_ARG_2, flags, version);
- de_locked = 0;
- new = !0;
+ /*
+ * Dist-ports do not use the "busy port message queue" functionality, but
+ * instead use "busy dist entry" functionality.
+ */
+ {
+ ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
+ erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL);
}
- hp = HAlloc(BIF_P, 3 + ERTS_MAGIC_REF_THING_SIZE);
- res = erts_build_dhandle(&hp, &BIF_P->off_heap, dep);
+ conn_id = dep->connection_id;
+ setup_connection_epiloge_rwunlock(BIF_P, dep, BIF_ARG_2, flags, version);
+ de_locked = 0;
+
+ hp = HAlloc(BIF_P, 3 + ERTS_DHANDLE_SIZE);
+ res = erts_build_dhandle(&hp, &BIF_P->off_heap, dep, conn_id);
res_tag = am_ok; /* Connection up */
- if (new)
- dep = NULL; /* inc of refc transferred to port (dist_entry field) */
+ dep = NULL; /* inc of refc transferred to port (dist_entry field) */
}
ASSERT(is_value(res) && is_value(res_tag));
@@ -3356,12 +3336,6 @@ BIF_RETTYPE erts_internal_create_dist_channel_4(BIF_ALIST_4)
return ret;
- yield:
- ERTS_BIF_PREP_YIELD4(ret,
- bif_export[BIF_erts_internal_create_dist_channel_4],
- BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
- goto done;
-
badarg:
ERTS_BIF_PREP_RET(ret, am_badarg);
goto done;
@@ -3383,8 +3357,7 @@ setup_connection_epiloge_rwunlock(Process *c_p, DistEntry *dep,
dep->creation = 0;
ASSERT(is_internal_port(ctrlr) || is_internal_pid(ctrlr));
- ASSERT(erts_atomic_read_nob(&dep->qsize) == 0
- || (dep->state == ERTS_DE_STATE_PENDING));
+ ASSERT(dep->state == ERTS_DE_STATE_PENDING);
if (flags & DFLAG_DIST_HDR_ATOM_CACHE)
create_cache(dep);
@@ -3430,37 +3403,20 @@ setup_connection_distctrl(Process *c_p, void *arg, int *redsp, ErlHeapFragment *
DistEntry *dep = scdcp->dep;
int dep_locked = 0;
Eterm *hp;
- erts_aint32_t state;
+ Uint32 conn_id;
if (redsp)
*redsp = 1;
- state = erts_atomic32_read_nob(&c_p->state);
-
- if (state & ERTS_PSFLG_EXITING)
- goto badarg;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
erts_de_rwlock(dep);
dep_locked = !0;
- if (dep->state == ERTS_DE_STATE_EXITING)
- goto badarg;
-
- if (ERTS_PROC_GET_DIST_ENTRY(c_p)) {
- if (dep == ERTS_PROC_GET_DIST_ENTRY(c_p)
- && (c_p->flags & F_DISTRIBUTION)
- && dep->cid == c_p->common.id) {
- goto connected;
- }
+ if (dep->state != ERTS_DE_STATE_PENDING)
goto badarg;
- }
- if (dep->state != ERTS_DE_STATE_PENDING) {
- if (dep->state == ERTS_DE_STATE_IDLE)
- erts_set_dist_entry_pending(dep);
- else
- goto badarg;
- }
+ conn_id = dep->connection_id;
if (is_not_nil(dep->cid))
goto badarg;
@@ -3475,18 +3431,17 @@ setup_connection_distctrl(Process *c_p, void *arg, int *redsp, ErlHeapFragment *
setup_connection_epiloge_rwunlock(c_p, dep, c_p->common.id,
scdcp->flags, scdcp->version);
-connected:
/* we take over previous inc in refc of dep */
if (!bpp) /* called directly... */
- return erts_make_dhandle(c_p, dep);
+ return erts_make_dhandle(c_p, dep, conn_id);
erts_free(ERTS_ALC_T_SETUP_CONN_ARG, arg);
- *bpp = new_message_buffer(ERTS_MAGIC_REF_THING_SIZE);
+ *bpp = new_message_buffer(ERTS_DHANDLE_SIZE);
hp = (*bpp)->mem;
- return erts_build_dhandle(&hp, &(*bpp)->off_heap, dep);
+ return erts_build_dhandle(&hp, &(*bpp)->off_heap, dep, conn_id);
badarg:
@@ -3527,34 +3482,30 @@ BIF_RETTYPE erts_internal_new_connection_1(BIF_ALIST_1)
erts_de_rwlock(dep);
switch (dep->state) {
- case ERTS_DE_STATE_PENDING:
case ERTS_DE_STATE_CONNECTED:
+ case ERTS_DE_STATE_EXITING:
+ case ERTS_DE_STATE_PENDING:
conn_id = dep->connection_id;
break;
case ERTS_DE_STATE_IDLE:
erts_set_dist_entry_pending(dep);
conn_id = dep->connection_id;
break;
- case ERTS_DE_STATE_EXITING:
- conn_id = (dep->connection_id + 1) & ERTS_DIST_CON_ID_MASK;
- break;
default:
erts_exit(ERTS_ABORT_EXIT, "Invalid dep->state (%d)\n", dep->state);
}
erts_de_rwunlock(dep);
- hp = HAlloc(BIF_P, 3 + ERTS_MAGIC_REF_THING_SIZE);
- dhandle = erts_build_dhandle(&hp, &BIF_P->off_heap, dep);
+ hp = HAlloc(BIF_P, ERTS_DHANDLE_SIZE);
+ dhandle = erts_build_dhandle(&hp, &BIF_P->off_heap, dep, conn_id);
erts_deref_dist_entry(dep);
- BIF_RET(TUPLE2(hp, make_small(conn_id), dhandle));
+ BIF_RET(dhandle);
}
-static Sint abort_connection(DistEntry* dep, Uint32 conn_id)
+Sint erts_abort_connection_rwunlock(DistEntry* dep)
{
- erts_de_rwlock(dep);
+ ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
- if (dep->connection_id != conn_id)
- ;
- else if (dep->state == ERTS_DE_STATE_CONNECTED) {
+ if (dep->state == ERTS_DE_STATE_CONNECTED) {
kill_connection(dep);
}
else if (dep->state == ERTS_DE_STATE_PENDING) {
@@ -3583,7 +3534,6 @@ static Sint abort_connection(DistEntry* dep, Uint32 conn_id)
dep->send = NULL;
erts_set_dist_entry_not_connected(dep);
-
erts_de_rwunlock(dep);
schedule_con_monitor_link_cleanup(mld, THE_NON_VALUE,
@@ -3596,42 +3546,37 @@ static Sint abort_connection(DistEntry* dep, Uint32 conn_id)
delete_cache(cache);
free_de_out_queues(dep, obuf);
-
- /*
- * We wait to make DistEntry idle and accept new connection attempts
- * until all is cleared and deallocated. This to get some back pressure
- * against repeated failing connection attempts saturating all CPUs
- * with cleanup jobs.
- */
- erts_de_rwlock(dep);
- ASSERT(dep->state == ERTS_DE_STATE_EXITING);
- dep->state = ERTS_DE_STATE_IDLE;
- erts_de_rwunlock(dep);
return reds;
}
erts_de_rwunlock(dep);
return 0;
}
+static Sint abort_connection(DistEntry *dep, Uint32 conn_id)
+{
+ erts_de_rwlock(dep);
+ if (dep->connection_id == conn_id)
+ return erts_abort_connection_rwunlock(dep);
+ erts_de_rwunlock(dep);
+ return 0;
+}
+
BIF_RETTYPE erts_internal_abort_connection_2(BIF_ALIST_2)
{
DistEntry* dep;
- Eterm* tp;
+ Uint32 conn_id;
+ Sint reds;
- if (is_not_atom(BIF_ARG_1) || is_not_tuple_arity(BIF_ARG_2, 2)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- tp = tuple_val(BIF_ARG_2);
- dep = erts_dhandle_to_dist_entry(tp[2]);
- if (is_not_small(tp[1]) || dep != erts_find_dist_entry(BIF_ARG_1)
+ if (is_not_atom(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+ dep = erts_dhandle_to_dist_entry(BIF_ARG_2, &conn_id);
+ if (!dep || dep != erts_find_dist_entry(BIF_ARG_1)
|| dep == erts_this_dist_entry) {
BIF_ERROR(BIF_P, BADARG);
}
- if (dep) {
- Sint reds = abort_connection(dep, unsigned_val(tp[1]));
- BUMP_REDS(BIF_P, reds);
- }
+ reds = abort_connection(dep, conn_id);
+ BUMP_REDS(BIF_P, reds);
BIF_RET(am_true);
}
@@ -3662,14 +3607,13 @@ int erts_auto_connect(DistEntry* dep, Process *proc, ErtsProcLocks proc_locks)
}
/*
- * Send {auto_connect, Node, ConnId, DHandle} to net_kernel
+ * Send {auto_connect, Node, DHandle} to net_kernel
*/
mp = erts_alloc_message_heap(net_kernel, &nk_locks,
- 5 + ERTS_MAGIC_REF_THING_SIZE,
+ 4 + ERTS_DHANDLE_SIZE,
&hp, &ohp);
- dhandle = erts_build_dhandle(&hp, ohp, dep);
- msg = TUPLE4(hp, am_auto_connect, dep->sysname, make_small(conn_id),
- dhandle);
+ dhandle = erts_build_dhandle(&hp, ohp, dep, conn_id);
+ msg = TUPLE3(hp, am_auto_connect, dep->sysname, dhandle);
ERL_MESSAGE_TOKEN(mp) = am_undefined;
erts_queue_proc_message(proc, net_kernel, nk_locks, mp, msg);
erts_proc_unlock(net_kernel, nk_locks);
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index 55204eb83d..845fab229a 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -136,6 +136,7 @@ typedef struct {
Eterm cid;
Eterm connection_id;
int no_suspend;
+ Uint32 flags;
} ErtsDSigData;
#define ERTS_DE_BUSY_LIMIT (1024*1024)
@@ -235,6 +236,7 @@ retry:
dsdp->cid = dep->cid;
dsdp->connection_id = dep->connection_id;
dsdp->no_suspend = no_suspend;
+ dsdp->flags = dep->flags;
if (dspl == ERTS_DSP_NO_LOCK)
erts_de_runlock(dep);
return res;
@@ -254,9 +256,9 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT((erts_atomic32_read_nob(&prt->state)
& ERTS_PORT_SFLGS_DEAD) == 0);
- ASSERT(prt->dist_entry);
- dep = prt->dist_entry;
+ dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
+ ASSERT(dep);
id = prt->common.id;
}
else {
@@ -400,5 +402,7 @@ extern void erts_kill_dist_connection(DistEntry *dep, Uint32);
extern Uint erts_dist_cache_size(void);
+extern Sint erts_abort_connection_rwunlock(DistEntry *dep);
+
#endif
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index 73d327da3e..395be67a90 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -277,75 +277,121 @@ BIF_RETTYPE lists_member_2(BIF_ALIST_2)
BIF_RET2(am_false, CONTEXT_REDS - max_iter/10);
}
-BIF_RETTYPE lists_reverse_2(BIF_ALIST_2)
+static BIF_RETTYPE lists_reverse_alloc(Process *c_p,
+ Eterm list_in,
+ Eterm tail_in)
{
- Eterm list;
- Eterm tmp_list;
- Eterm result;
- Eterm* hp;
- Uint n;
- int max_iter;
-
- /*
- * Handle legal and illegal non-lists quickly.
- */
- if (is_nil(BIF_ARG_1)) {
- BIF_RET(BIF_ARG_2);
- } else if (is_not_list(BIF_ARG_1)) {
- error:
- BIF_ERROR(BIF_P, BADARG);
+ static const Uint CELLS_PER_RED = 40;
+
+ Eterm *heap_top, *heap_end;
+ Uint cells_left, max_cells;
+ Eterm list, tail;
+ Eterm lookahead;
+
+ list = list_in;
+ tail = tail_in;
+
+ cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p));
+ lookahead = list;
+
+ while (cells_left != 0 && is_list(lookahead)) {
+ lookahead = CDR(list_val(lookahead));
+ cells_left--;
}
- /*
- * First use the rest of the remaning heap space.
- */
- list = BIF_ARG_1;
- result = BIF_ARG_2;
- hp = HEAP_TOP(BIF_P);
- n = HeapWordsLeft(BIF_P) / 2;
- while (n != 0 && is_list(list)) {
- Eterm* pair = list_val(list);
- result = CONS(hp, CAR(pair), result);
- list = CDR(pair);
- hp += 2;
- n--;
+ BUMP_REDS(c_p, (max_cells - cells_left) / CELLS_PER_RED);
+
+ if (is_not_list(lookahead) && is_not_nil(lookahead)) {
+ BIF_ERROR(c_p, BADARG);
}
- HEAP_TOP(BIF_P) = hp;
+
+ heap_top = HAlloc(c_p, 2 * (max_cells - cells_left));
+ heap_end = heap_top + 2 * (max_cells - cells_left);
+
+ while (heap_top < heap_end) {
+ Eterm *pair = list_val(list);
+
+ tail = CONS(heap_top, CAR(pair), tail);
+ list = CDR(pair);
+
+ ASSERT(is_list(list) || is_nil(list));
+
+ heap_top += 2;
+ }
+
if (is_nil(list)) {
- BIF_RET(result);
+ BIF_RET(tail);
}
- /*
- * Calculate length of remaining list (up to a suitable limit).
- */
- max_iter = CONTEXT_REDS * 40;
- n = 0;
- tmp_list = list;
- while (max_iter-- > 0 && is_list(tmp_list)) {
- tmp_list = CDR(list_val(tmp_list));
- n++;
+ ASSERT(is_list(tail) && cells_left == 0);
+ BIF_TRAP2(bif_export[BIF_lists_reverse_2], c_p, list, tail);
+}
+
+static BIF_RETTYPE lists_reverse_onheap(Process *c_p,
+ Eterm list_in,
+ Eterm tail_in)
+{
+ static const Uint CELLS_PER_RED = 60;
+
+ Eterm *heap_top, *heap_end;
+ Uint cells_left, max_cells;
+ Eterm list, tail;
+
+ list = list_in;
+ tail = tail_in;
+
+ cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p));
+
+ ASSERT(HEAP_LIMIT(c_p) >= HEAP_TOP(c_p) + 2);
+ heap_end = HEAP_LIMIT(c_p) - 2;
+ heap_top = HEAP_TOP(c_p);
+
+ while (heap_top < heap_end && is_list(list)) {
+ Eterm *pair = list_val(list);
+
+ tail = CONS(heap_top, CAR(pair), tail);
+ list = CDR(pair);
+
+ heap_top += 2;
}
- if (is_not_nil(tmp_list) && is_not_list(tmp_list)) {
- goto error;
+
+ cells_left -= (heap_top - heap_end) / 2;
+ BUMP_REDS(c_p, (max_cells - cells_left) / CELLS_PER_RED);
+ HEAP_TOP(c_p) = heap_top;
+
+ if (is_nil(list)) {
+ BIF_RET(tail);
+ } else if (is_list(list)) {
+ ASSERT(is_list(tail));
+
+ if (cells_left > CELLS_PER_RED) {
+ return lists_reverse_alloc(c_p, list, tail);
+ }
+
+ BUMP_ALL_REDS(c_p);
+ BIF_TRAP2(bif_export[BIF_lists_reverse_2], c_p, list, tail);
}
- /*
- * Now do one HAlloc() and continue reversing.
- */
- hp = HAlloc(BIF_P, 2*n);
- while (n != 0 && is_list(list)) {
- Eterm* pair = list_val(list);
- result = CONS(hp, CAR(pair), result);
- list = CDR(pair);
- hp += 2;
- n--;
+ BIF_ERROR(c_p, BADARG);
+}
+
+BIF_RETTYPE lists_reverse_2(BIF_ALIST_2)
+{
+ /* Handle legal and illegal non-lists quickly. */
+ if (is_nil(BIF_ARG_1)) {
+ BIF_RET(BIF_ARG_2);
+ } else if (is_not_list(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
}
- if (is_nil(list)) {
- BIF_RET(result);
- } else {
- BUMP_ALL_REDS(BIF_P);
- BIF_TRAP2(bif_export[BIF_lists_reverse_2], BIF_P, list, result);
+
+ /* We build the reversal on the unused part of the heap if possible to save
+ * us the trouble of having to figure out the list size. We fall back to
+ * lists_reverse_alloc when we run out of space. */
+ if (HeapWordsLeft(BIF_P) > 8) {
+ return lists_reverse_onheap(BIF_P, BIF_ARG_1, BIF_ARG_2);
}
+
+ return lists_reverse_alloc(BIF_P, BIF_ARG_1, BIF_ARG_2);
}
BIF_RETTYPE
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index c009a3bde8..1df972f4b6 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -90,7 +90,8 @@ enum DbIterSafety {
ITER_SAFE /* No need to fixate at all */
};
# define ITERATION_SAFETY(Proc,Tab) \
- ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \
+ ((IS_TREE_TABLE((Tab)->common.status) || IS_CATREE_TABLE((Tab)->common.status) \
+ || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \
: (((Tab)->common.status & DB_FINE_LOCKED) ? ITER_UNSAFE : ITER_SAFE_LOCKED))
#define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP))
@@ -359,6 +360,7 @@ typedef enum {
extern DbTableMethod db_hash;
extern DbTableMethod db_tree;
+extern DbTableMethod db_catree;
int user_requested_db_max_tabs;
int erts_ets_realloc_always_moves;
@@ -414,6 +416,15 @@ free_dbtable(void *vtb)
tb->common.fixations);
}
#endif
+ if (erts_atomic_read_nob(&tb->common.memory_size) > sizeof(DbTable)) {
+ /* The CA tree implementation use delayed freeing and the DbTable needs to
+ be freed after all other memory blocks that are allocated by the table. */
+ erts_schedule_thr_prgr_later_cleanup_op(free_dbtable,
+ (void *) tb,
+ &tb->release.data,
+ sizeof(DbTable));
+ return;
+ }
erts_rwmtx_destroy(&tb->common.rwlock);
erts_mtx_destroy(&tb->common.fixlock);
ASSERT(is_immed(tb->common.heir_data));
@@ -1076,7 +1087,7 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_element_3);
UseTmpHeap(2,BIF_P);
- if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
+ if (!(tb->common.status & (DB_SET | DB_ORDERED_SET | DB_CA_ORDERED_SET))) {
goto bail_out;
}
if (is_tuple(BIF_ARG_3)) {
@@ -1165,7 +1176,7 @@ do_update_counter(Process *p, DbTable* tb,
UseTmpHeap(5, p);
- if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
+ if (!(tb->common.status & (DB_SET | DB_ORDERED_SET | DB_CA_ORDERED_SET))) {
goto bail_out;
}
if (is_integer(arg3)) { /* Incr */
@@ -1647,15 +1658,15 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
val = CAR(list_val(list));
if (val == am_bag) {
status |= DB_BAG;
- status &= ~(DB_SET | DB_DUPLICATE_BAG | DB_ORDERED_SET);
+ status &= ~(DB_SET | DB_DUPLICATE_BAG | DB_ORDERED_SET | DB_CA_ORDERED_SET);
}
else if (val == am_duplicate_bag) {
status |= DB_DUPLICATE_BAG;
- status &= ~(DB_SET | DB_BAG | DB_ORDERED_SET);
+ status &= ~(DB_SET | DB_BAG | DB_ORDERED_SET | DB_CA_ORDERED_SET);
}
else if (val == am_ordered_set) {
status |= DB_ORDERED_SET;
- status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG);
+ status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG | DB_CA_ORDERED_SET);
}
else if (is_tuple(val)) {
Eterm *tp = tuple_val(val);
@@ -1716,7 +1727,13 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
if (is_not_nil(list)) { /* bad opt or not a well formed list */
BIF_ERROR(BIF_P, BADARG);
}
- if (IS_HASH_TABLE(status)) {
+ if (IS_TREE_TABLE(status) && is_fine_locked && !(status & DB_PRIVATE)) {
+ meth = &db_catree;
+ status |= DB_CA_ORDERED_SET;
+ status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG | DB_ORDERED_SET);
+ status |= DB_FINE_LOCKED;
+ }
+ else if (IS_HASH_TABLE(status)) {
meth = &db_hash;
if (is_fine_locked && !(status & DB_PRIVATE)) {
status |= DB_FINE_LOCKED;
@@ -3506,6 +3523,7 @@ void init_db(ErtsDbSpinCount db_spin_count)
db_initialize_hash();
db_initialize_tree();
+ db_initialize_catree();
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_delete_continue_exp,
@@ -4114,6 +4132,8 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_duplicate_bag;
} else if (tb->common.status & DB_ORDERED_SET) {
ret = am_ordered_set;
+ } else if (tb->common.status & DB_CA_ORDERED_SET) {
+ ret = am_ordered_set;
} else { /*TT*/
ASSERT(tb->common.status & DB_BAG);
ret = am_bag;
@@ -4409,6 +4429,12 @@ void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable) {
if(IS_HASH_TABLE(tb->common.status)) {
erts_lcnt_enable_db_hash_lock_count(&tb->hash, enable);
+ } else if(IS_CATREE_TABLE(tb->common.status)) {
+ /* erts_lcnt_enable_db_catree_lock_count is not thread safe so
+ the table needs to get locked */
+ db_lock(tb, LCK_WRITE);
+ erts_lcnt_enable_db_catree_lock_count(&tb->catree, enable);
+ db_unlock(tb, LCK_WRITE);
}
}
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index 23975d208f..45d120ac0e 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -66,6 +66,7 @@ typedef struct {
#include "erl_db_util.h" /* Flags */
#include "erl_db_hash.h" /* DbTableHash */
#include "erl_db_tree.h" /* DbTableTree */
+#include "erl_db_catree.h" /* DbTableCATree */
/*TT*/
Uint erts_get_ets_misc_mem_size(void);
@@ -90,6 +91,7 @@ union db_table {
DbTableCommon common; /* Any type of db table */
DbTableHash hash; /* Linear hash array specific data */
DbTableTree tree; /* AVL tree specific data */
+ DbTableCATree catree; /* CA tree specific data */
DbTableRelease release;
/*TT*/
};
diff --git a/erts/emulator/beam/erl_db_catree.c b/erts/emulator/beam/erl_db_catree.c
new file mode 100644
index 0000000000..37a299df35
--- /dev/null
+++ b/erts/emulator/beam/erl_db_catree.c
@@ -0,0 +1,2174 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB and Kjell Winblad 1998-2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Implementation of ETS ordered_set table type with
+ * fine-grained synchronization.
+ *
+ * Author: Kjell Winblad
+ *
+ * This implementation is based on the contention adapting search tree
+ * (CA tree). The CA tree is a concurrent data structure that
+ * dynamically adapts its synchronization granularity based on how
+ * much contention is detected in locks. The following publication
+ * contains a detailed description of CA trees:
+ *
+ * A Contention Adapting Approach to Concurrent Ordered Sets
+ * Journal of Parallel and Distributed Computing, 2018
+ * Kjell Winblad and Konstantinos Sagonas
+ * https://doi.org/10.1016/j.jpdc.2017.11.007
+ *
+ * The following publication may also be interesting as it discusses
+ * how the CA tree can be used as an ETS ordered_set table type
+ * backend:
+ *
+ * More Scalable Ordered Set for ETS Using Adaptation
+ * In Thirteenth ACM SIGPLAN workshop on Erlang (2014)
+ * Kjell Winblad and Konstantinos Sagonas
+ * https://doi.org/10.1145/2633448.2633455
+ *
+ * This implementation of the ordered_set ETS table type is only
+ * activated when the options {write_concurrency, true}, public and
+ * ordered_set are passed to the ets:new/2 function. This
+ * implementation is expected to scale better than the default
+ * implementation (located in "erl_db_tree.c") when concurrent
+ * processes use the following ETS operations to operate on a table:
+ *
+ * delete/2, delete_object/2, first/1, insert/2 (single object),
+ * insert_new/2 (single object), lookup/2, lookup_element/2, member/2,
+ * next/2, take/2 and update_element/3 (single object).
+ *
+ * Currently, the implementation does not have scalable support for
+ * the other operations (e.g., select/2). These operations are handled
+ * by merging all locks so that all terms get protected by a single
+ * lock. This implementation may thus perform worse than the default
+ * implementation in some scenarios. For example, when concurrent
+ * processes access a table with the operations insert/2, delete/2 and
+ * select/2, the insert/2 and delete/2 operations will trigger splits
+ * of locks (to get more fine-grained synchronization) but this will
+ * quickly be undone by the select/2 operation if this operation is
+ * also called frequently.
+ *
+ * The default implementation has a static stack optimization (see
+ * get_static_stack in erl_db_tree.c). This implementation does not
+ * have such an optimization as it induces bad scalability when
+ * concurrent read operations are frequent (they all try to get hold
+ * of the same stack). The default implementation may thus perform
+ * better compared to this implementation in scenarios where the
+ * static stack optimization is useful. One such scenario is when only
+ * one process is accessing the table and this process is traversing
+ * the table with a sequence of next/2 calls.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#define ERTS_WANT_DB_INTERNAL__
+#include "erl_db.h"
+#include "bif.h"
+#include "big.h"
+#include "erl_binary.h"
+
+#include "erl_db_catree.h"
+#include "erl_db_tree.h"
+#include "erl_db_tree_util.h"
+
+/*
+** Forward declarations
+*/
+
+static SWord do_free_base_node_cont(DbTableCATree *tb, SWord num_left);
+static SWord do_free_routing_nodes_catree_cont(DbTableCATree *tb, SWord num_left);
+static DbTableCATreeNode *catree_first_base_node_from_free_list(DbTableCATree *tb);
+
+/* Method interface functions */
+static int db_first_catree(Process *p, DbTable *tbl,
+ Eterm *ret);
+static int db_next_catree(Process *p, DbTable *tbl,
+ Eterm key, Eterm *ret);
+static int db_last_catree(Process *p, DbTable *tbl,
+ Eterm *ret);
+static int db_prev_catree(Process *p, DbTable *tbl,
+ Eterm key,
+ Eterm *ret);
+static int db_put_catree(DbTable *tbl, Eterm obj, int key_clash_fail);
+static int db_get_catree(Process *p, DbTable *tbl,
+ Eterm key, Eterm *ret);
+static int db_member_catree(DbTable *tbl, Eterm key, Eterm *ret);
+static int db_get_element_catree(Process *p, DbTable *tbl,
+ Eterm key,int ndex,
+ Eterm *ret);
+static int db_erase_catree(DbTable *tbl, Eterm key, Eterm *ret);
+static int db_erase_object_catree(DbTable *tbl, Eterm object,Eterm *ret);
+static int db_slot_catree(Process *p, DbTable *tbl,
+ Eterm slot_term, Eterm *ret);
+static int db_select_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, int reversed, Eterm *ret);
+static int db_select_count_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_chunk_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Sint chunk_size,
+ int reversed, Eterm *ret);
+static int db_select_continue_catree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+static int db_select_count_continue_catree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+static int db_select_delete_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_delete_continue_catree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+static int db_select_replace_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_replace_continue_catree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+static int db_take_catree(Process *, DbTable *, Eterm, Eterm *);
+static void db_print_catree(fmtfn_t to, void *to_arg,
+ int show, DbTable *tbl);
+static int db_free_table_catree(DbTable *tbl);
+static SWord db_free_table_continue_catree(DbTable *tbl, SWord);
+static void db_foreach_offheap_catree(DbTable *,
+ void (*)(ErlOffHeap *, void *),
+ void *);
+static SWord db_delete_all_objects_catree(Process* p, DbTable* tbl, SWord reds);
+static int
+db_lookup_dbterm_catree(Process *, DbTable *, Eterm key, Eterm obj,
+ DbUpdateHandle*);
+static void db_finalize_dbterm_catree(int cret, DbUpdateHandle *);
+
+/*
+** External interface
+*/
+DbTableMethod db_catree =
+{
+ db_create_catree,
+ db_first_catree,
+ db_next_catree,
+ db_last_catree,
+ db_prev_catree,
+ db_put_catree,
+ db_get_catree,
+ db_get_element_catree,
+ db_member_catree,
+ db_erase_catree,
+ db_erase_object_catree,
+ db_slot_catree,
+ db_select_chunk_catree,
+ db_select_catree,
+ db_select_delete_catree,
+ db_select_continue_catree,
+ db_select_delete_continue_catree,
+ db_select_count_catree,
+ db_select_count_continue_catree,
+ db_select_replace_catree,
+ db_select_replace_continue_catree,
+ db_take_catree,
+ db_delete_all_objects_catree,
+ db_free_table_catree,
+ db_free_table_continue_catree,
+ db_print_catree,
+ db_foreach_offheap_catree,
+ db_lookup_dbterm_catree,
+ db_finalize_dbterm_catree
+
+};
+
+/*
+ * Constants
+ */
+
+#define ERL_DB_CATREE_LOCK_FAILURE_CONTRIBUTION 200
+#define ERL_DB_CATREE_LOCK_SUCCESS_CONTRIBUTION (-1)
+#define ERL_DB_CATREE_LOCK_MORE_THAN_ONE_CONTRIBUTION (-10)
+#define ERL_DB_CATREE_HIGH_CONTENTION_LIMIT 1000
+#define ERL_DB_CATREE_LOW_CONTENTION_LIMIT (-1000)
+#define ERL_DB_CATREE_MAX_ROUTE_NODE_LAYER_HEIGHT 14
+
+/*
+ * Internal CA tree related helper functions and macros
+ */
+
+#define GET_ROUTE_NODE_KEY(node) (node->baseOrRoute.route.key.tpl[0])
+#define GET_BASE_NODE_LOCK(node) (&(node->baseOrRoute.base.lock))
+#define GET_ROUTE_NODE_LOCK(node) (&(node->baseOrRoute.route.lock))
+
+
+/* Helpers for reading and writing shared atomic variables */
+
+/* No memory barrier */
+#define GET_ROOT(tb) ((DbTableCATreeNode*)erts_atomic_read_nob(&(tb->root)))
+#define GET_LEFT(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_nob(&(ca_tree_route_node->baseOrRoute.route.left)))
+#define GET_RIGHT(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_nob(&(ca_tree_route_node->baseOrRoute.route.right)))
+#define SET_ROOT(tb, v) erts_atomic_set_nob(&((tb)->root), (erts_aint_t)(v))
+#define SET_LEFT(ca_tree_route_node, v) erts_atomic_set_nob(&(ca_tree_route_node->baseOrRoute.route.left), (erts_aint_t)(v));
+#define SET_RIGHT(ca_tree_route_node, v) erts_atomic_set_nob(&(ca_tree_route_node->baseOrRoute.route.right), (erts_aint_t)(v));
+
+
+/* Release or acquire barriers */
+#define GET_ROOT_ACQB(tb) ((DbTableCATreeNode*)erts_atomic_read_acqb(&(tb->root)))
+#define GET_LEFT_ACQB(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_acqb(&(ca_tree_route_node->baseOrRoute.route.left)))
+#define GET_RIGHT_ACQB(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_acqb(&(ca_tree_route_node->baseOrRoute.route.right)))
+#define SET_ROOT_RELB(tb, v) erts_atomic_set_relb(&((tb)->root), (erts_aint_t)(v))
+#define SET_LEFT_RELB(ca_tree_route_node, v) erts_atomic_set_relb(&(ca_tree_route_node->baseOrRoute.route.left), (erts_aint_t)(v));
+#define SET_RIGHT_RELB(ca_tree_route_node, v) erts_atomic_set_relb(&(ca_tree_route_node->baseOrRoute.route.right), (erts_aint_t)(v));
+
+/* Compares a key to the key in a route node */
+static ERTS_INLINE Sint cmp_key_route(DbTableCommon * tb,
+ Eterm key,
+ DbTableCATreeNode *obj)
+{
+ return CMP(key, GET_ROUTE_NODE_KEY(obj));
+}
+
+static ERTS_INLINE void push_node_dyn_array(DbTable *tb,
+ CATreeNodeStack *stack,
+ DbTableCATreeNode *node)
+{
+ int i;
+ if (stack->pos == stack->size) {
+ DbTableCATreeNode **newArray =
+ erts_db_alloc(ERTS_ALC_T_DB_STK, tb,
+ sizeof(DbTableCATreeNode*) * (stack->size*2));
+ for (i = 0; i < stack->pos; i++) {
+ newArray[i] = stack->array[i];
+ }
+ if (stack->size > STACK_NEED) {
+ /* Dynamically allocated array that needs to be deallocated */
+ erts_db_free(ERTS_ALC_T_DB_STK, tb,
+ stack->array,
+ sizeof(DbTableCATreeNode *) * stack->size);
+ }
+ stack->array = newArray;
+ stack->size = stack->size*2;
+ }
+ PUSH_NODE(stack, node);
+}
+
+/*
+ * Used by the split_tree function
+ */
+static ERTS_INLINE
+int less_than_two_elements(TreeDbTerm *root)
+{
+ return root == NULL || (root->left == NULL && root->right == NULL);
+}
+
+/*
+ * Inserts a TreeDbTerm into a tree. Returns the new root.
+ */
+static ERTS_INLINE
+TreeDbTerm* insert_TreeDbTerm(DbTableCommon *common_table_data,
+ TreeDbTerm *insert_to_root,
+ TreeDbTerm *value_to_insert) {
+ /* Non recursive insertion in AVL tree, building our own stack */
+ TreeDbTerm **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 0;
+ int state = 0;
+ TreeDbTerm * base = insert_to_root;
+ TreeDbTerm **this = &base;
+ Sint c;
+ Eterm key;
+ int dir;
+ TreeDbTerm *p1, *p2, *p;
+
+ key = GETKEY(common_table_data, value_to_insert->dbterm.tpl);
+
+ dstack[dpos++] = DIR_END;
+ for (;;)
+ if (!*this) { /* Found our place */
+ state = 1;
+ *this = value_to_insert;
+ (*this)->balance = 0;
+ (*this)->left = (*this)->right = NULL;
+ break;
+ } else if ((c = cmp_key(common_table_data, key, *this)) < 0) {
+ /* go lefts */
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ }
+
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ p = *this;
+ if (dir == DIR_LEFT) {
+ switch (p->balance) {
+ case 1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = -1;
+ break;
+ case -1: /* The icky case */
+ p1 = p->left;
+ if (p1->balance == -1) { /* Single LL rotation */
+ p->left = p1->right;
+ p1->right = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RR rotation */
+ p2 = p1->right;
+ p1->right = p2->left;
+ p2->left = p1;
+ p->left = p2->right;
+ p2->right = p;
+ p->balance = (p2->balance == -1) ? +1 : 0;
+ p1->balance = (p2->balance == 1) ? -1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ } else { /* dir == DIR_RIGHT */
+ switch (p->balance) {
+ case -1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = 1;
+ break;
+ case 1:
+ p1 = p->right;
+ if (p1->balance == 1) { /* Single RR rotation */
+ p->right = p1->left;
+ p1->left = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RL rotation */
+ p2 = p1->left;
+ p1->left = p2->right;
+ p2->right = p1;
+ p->right = p2->left;
+ p2->left = p;
+ p->balance = (p2->balance == 1) ? -1 : 0;
+ p1->balance = (p2->balance == -1) ? 1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ }
+ }
+ return base;
+}
+
+/*
+ * Split an AVL tree into two trees. The function stores the node
+ * containing the "split key" in the write back parameter
+ * split_key_wb. The function stores the left tree containing the keys
+ * that are smaller than the "split key" in the write back parameter
+ * left_wb and the tree containing the rest of the keys in the write
+ * back parameter right_wb.
+ */
+static void split_tree(DbTableCommon *tb,
+ TreeDbTerm *root,
+ TreeDbTerm **split_key_node_wb,
+ TreeDbTerm **left_wb,
+ TreeDbTerm **right_wb) {
+ TreeDbTerm * split_node = NULL;
+ TreeDbTerm * left_root;
+ TreeDbTerm * right_root;
+ if (root->left == NULL) { /* To get non empty split */
+ *right_wb = root->right;
+ *split_key_node_wb = root->right;
+ root->right = NULL;
+ root->balance = 0;
+ *left_wb = root;
+ return;
+ }
+ split_node = root;
+ left_root = split_node->left;
+ split_node->left = NULL;
+ right_root = split_node->right;
+ split_node->right = NULL;
+ right_root = insert_TreeDbTerm(tb,
+ right_root,
+ split_node);
+ *split_key_node_wb = split_node;
+ *left_wb = left_root;
+ *right_wb = right_root;
+}
+
+/*
+ * Used by the join_trees function
+ */
+static ERTS_INLINE int compute_tree_hight(TreeDbTerm * root)
+{
+ if(root == NULL) {
+ return 0;
+ } else {
+ TreeDbTerm * current_node = root;
+ int hight_so_far = 1;
+ while (current_node->left != NULL || current_node->right != NULL) {
+ if (current_node->balance == -1) {
+ current_node = current_node->left;
+ } else {
+ current_node = current_node->right;
+ }
+ hight_so_far = hight_so_far + 1;
+ }
+ return hight_so_far;
+ }
+}
+
+/*
+ * Used by the join_trees function
+ */
+static ERTS_INLINE
+TreeDbTerm* linkout_min_or_max_tree_node(TreeDbTerm **root, int is_min)
+{
+ TreeDbTerm **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 0;
+ int state = 0;
+ TreeDbTerm **this = root;
+ int dir;
+ TreeDbTerm *q = NULL;
+
+ dstack[dpos++] = DIR_END;
+ for (;;) {
+ if (!*this) { /* Failure */
+ return NULL;
+ } else if (is_min && (*this)->left != NULL) {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (!is_min && (*this)->right != NULL) {
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else { /* Min value, found the one to splice out */
+ q = (*this);
+ if (q->right == NULL) {
+ (*this) = q->left;
+ state = 1;
+ } else if (q->left == NULL) {
+ (*this) = q->right;
+ state = 1;
+ }
+ break;
+ }
+ }
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ if (dir == DIR_LEFT) {
+ state = tree_balance_left(this);
+ } else {
+ state = tree_balance_right(this);
+ }
+ }
+ return q;
+}
+
+#define LINKOUT_MIN_TREE_NODE(root) linkout_min_or_max_tree_node(root, 1)
+#define LINKOUT_MAX_TREE_NODE(root) linkout_min_or_max_tree_node(root, 0)
+
+/*
+ * Joins two AVL trees where all the keys in the left one are smaller
+ * then the keys in the right one and returns the resulting tree.
+ *
+ * The algorithm is described on page 474 in D. E. Knuth. The Art of
+ * Computer Programming: Sorting and Searching,
+ * vol. 3. Addison-Wesley, 2nd edition, 1998.
+ */
+static TreeDbTerm* join_trees(TreeDbTerm *left_root_param,
+ TreeDbTerm *right_root_param)
+{
+ TreeDbTerm **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 0;
+ int state = 1;
+ TreeDbTerm **this;
+ int dir;
+ TreeDbTerm *p1, *p2, *p;
+ TreeDbTerm *left_root = left_root_param;
+ TreeDbTerm *right_root = right_root_param;
+ int left_height;
+ int right_height;
+ int current_height;
+ dstack[dpos++] = DIR_END;
+ if (left_root == NULL) {
+ return right_root;
+ } else if (right_root == NULL) {
+ return left_root;
+ }
+
+ left_height = compute_tree_hight(left_root);
+ right_height = compute_tree_hight(right_root);
+ if (left_height >= right_height) {
+ TreeDbTerm * new_root =
+ LINKOUT_MIN_TREE_NODE(&right_root);
+ int new_right_height = compute_tree_hight(right_root);
+ TreeDbTerm * current_node = left_root;
+ this = &left_root;
+ current_height = left_height;
+ while(current_height > new_right_height + 1) {
+ if (current_node->balance == -1) {
+ current_height = current_height - 2;
+ } else {
+ current_height = current_height - 1;
+ }
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ current_node = current_node->right;
+ }
+ new_root->left = current_node;
+ new_root->right = right_root;
+ new_root->balance = new_right_height - current_height;
+ *this = new_root;
+ } else {
+ /* This case is symmetric to the previous case */
+ TreeDbTerm * new_root =
+ LINKOUT_MAX_TREE_NODE(&left_root);
+ int new_left_height = compute_tree_hight(left_root);
+ TreeDbTerm * current_node = right_root;
+ this = &right_root;
+ current_height = right_height;
+ while (current_height > new_left_height + 1) {
+ if (current_node->balance == 1) {
+ current_height = current_height - 2;
+ } else {
+ current_height = current_height - 1;
+ }
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ current_node = current_node->left;
+ }
+ new_root->right = current_node;
+ new_root->left = left_root;
+ new_root->balance = current_height - new_left_height;
+ *this = new_root;
+ }
+ /* Now we need to continue as if this was during the insert */
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ p = *this;
+ if (dir == DIR_LEFT) {
+ switch (p->balance) {
+ case 1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = -1;
+ break;
+ case -1: /* The icky case */
+ p1 = p->left;
+ if (p1->balance == -1) { /* Single LL rotation */
+ p->left = p1->right;
+ p1->right = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RR rotation */
+ p2 = p1->right;
+ p1->right = p2->left;
+ p2->left = p1;
+ p->left = p2->right;
+ p2->right = p;
+ p->balance = (p2->balance == -1) ? +1 : 0;
+ p1->balance = (p2->balance == 1) ? -1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ } else { /* dir == DIR_RIGHT */
+ switch (p->balance) {
+ case -1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = 1;
+ break;
+ case 1:
+ p1 = p->right;
+ if (p1->balance == 1) { /* Single RR rotation */
+ p->right = p1->left;
+ p1->left = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RL rotation */
+ p2 = p1->left;
+ p1->left = p2->right;
+ p2->right = p1;
+ p->right = p2->left;
+ p2->left = p;
+ p->balance = (p2->balance == 1) ? -1 : 0;
+ p1->balance = (p2->balance == -1) ? 1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ }
+ }
+ /* Return the joined tree */
+ if (left_height >= right_height) {
+ return left_root;
+ } else {
+ return right_root;
+ }
+}
+
+
+static ERTS_INLINE
+int try_wlock_base_node(DbTableCATreeBaseNode *base_node)
+{
+ return EBUSY == erts_rwmtx_tryrwlock(&base_node->lock);
+}
+
+/*
+ * Locks a base node without adjusting the lock statistics
+ */
+static ERTS_INLINE
+void wlock_base_node_no_stats(DbTableCATreeBaseNode *base_node)
+{
+ erts_rwmtx_rwlock(&base_node->lock);
+}
+
+/*
+ * Locks a base node and adjusts the lock statistics according to if
+ * the lock was contended or not
+ */
+static ERTS_INLINE
+void wlock_base_node(DbTableCATreeBaseNode *base_node)
+{
+ if (try_wlock_base_node(base_node)) {
+ /* The lock is contended */
+ wlock_base_node_no_stats(base_node);
+ base_node->lock_statistics += ERL_DB_CATREE_LOCK_FAILURE_CONTRIBUTION;
+ } else {
+ base_node->lock_statistics += ERL_DB_CATREE_LOCK_SUCCESS_CONTRIBUTION;
+ }
+}
+
+static ERTS_INLINE
+void wunlock_base_node(DbTableCATreeBaseNode *base_node)
+{
+ erts_rwmtx_rwunlock(&base_node->lock);
+}
+
+static ERTS_INLINE
+void rlock_base_node(DbTableCATreeBaseNode *base_node)
+{
+ erts_rwmtx_rlock(&base_node->lock);
+}
+
+static ERTS_INLINE
+void runlock_base_node(DbTableCATreeBaseNode *base_node)
+{
+ erts_rwmtx_runlock(&base_node->lock);
+}
+
+static ERTS_INLINE
+void lock_route_node(DbTableCATreeRouteNode *route_node)
+{
+ erts_mtx_lock(&route_node->lock);
+}
+
+static ERTS_INLINE
+void unlock_route_node(DbTableCATreeRouteNode *route_node)
+{
+ erts_mtx_unlock(&route_node->lock);
+}
+
+
+/*
+ * The following macros are used to create the ETS functions that only
+ * need to access one element (e.g. db_put_catree, db_get_catree and
+ * db_erase_catree).
+ */
+
+#define ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_FIND_BASE_NODE_PART(LOCK,UNLOCK) \
+ int retry; \
+ DbTableCATreeNode *current_node; \
+ DbTableCATreeNode *prev_node; \
+ DbTableCommon* common_table_data = &tb->common; \
+ DbTableCATreeBaseNode *base_node; \
+ int current_level; \
+ (void)prev_node; \
+ do { \
+ retry = 0; \
+ current_node = GET_ROOT_ACQB(tb); \
+ prev_node = NULL; \
+ current_level = 0; \
+ while ( ! current_node->is_base_node ) { \
+ current_level = current_level + 1; \
+ prev_node = current_node; \
+ if (cmp_key_route(common_table_data,key,current_node) < 0) { \
+ current_node = GET_LEFT_ACQB(current_node); \
+ } else { \
+ current_node = GET_RIGHT_ACQB(current_node); \
+ } \
+ } \
+ base_node = &current_node->baseOrRoute.base; \
+ LOCK(base_node); \
+ if ( ! base_node->is_valid ) { \
+ /* Retry */ \
+ UNLOCK(base_node); \
+ retry = 1; \
+ } \
+ } while(retry);
+
+
+#define ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_ADAPT_AND_UNLOCK_PART \
+ if (base_node->lock_statistics > ERL_DB_CATREE_HIGH_CONTENTION_LIMIT \
+ && current_level < ERL_DB_CATREE_MAX_ROUTE_NODE_LAYER_HEIGHT) { \
+ split_catree(&tb->common, prev_node, current_node); \
+ } else if (base_node->lock_statistics < ERL_DB_CATREE_LOW_CONTENTION_LIMIT) { \
+ join_catree(tb, prev_node, current_node); \
+ } else { \
+ wunlock_base_node(base_node); \
+ }
+
+#define ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION(FUN_POSTFIX,PARAMETERS,SEQUENTAIL_CALL) \
+ static int erl_db_catree_do_operation_##FUN_POSTFIX(DbTableCATree *tb, \
+ Eterm key, \
+ PARAMETERS){ \
+ int result; \
+ ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_FIND_BASE_NODE_PART(wlock_base_node,wunlock_base_node) \
+ result = SEQUENTAIL_CALL; \
+ ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_ADAPT_AND_UNLOCK_PART \
+ return result; \
+ }
+
+
+#define ERL_DB_CATREE_CREATE_DO_READ_OPERATION_FUNCTION(FUN_POSTFIX,PARAMETERS,SEQUENTAIL_CALL) \
+ static int erl_db_catree_do_operation_##FUN_POSTFIX(DbTableCATree *tb, \
+ Eterm key, \
+ PARAMETERS){ \
+ int result; \
+ ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_FIND_BASE_NODE_PART(rlock_base_node,runlock_base_node) \
+ result = SEQUENTAIL_CALL; \
+ runlock_base_node(base_node); \
+ return result; \
+ }
+
+
+
+static DbTableCATreeNode *create_catree_base_node(DbTableCATree *tb)
+{
+ DbTableCATreeNode *new_base_node_container =
+ erts_db_alloc(ERTS_ALC_T_DB_TABLE,
+ (DbTable *) tb,
+ sizeof(DbTableCATreeNode));
+ DbTableCATreeBaseNode *new_base_node =
+ &new_base_node_container->baseOrRoute.base;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ new_base_node_container->is_base_node = 1;
+ new_base_node->root = NULL;
+ if (tb->common.type & DB_FREQ_READ)
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ if (erts_ets_rwmtx_spin_count >= 0)
+ rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
+ erts_rwmtx_init_opt(&new_base_node->lock, &rwmtx_opt,
+ "erl_db_catree_base_node", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ new_base_node->lock_statistics = 0;
+ new_base_node->is_valid = 1;
+ new_base_node->tab = (DbTable *) tb;
+ return new_base_node_container;
+}
+
+static DbTableCATreeNode*
+create_catree_route_node(DbTableCommon * common_table_data,
+ DbTableCATreeNode *left,
+ DbTableCATreeNode *right,
+ DbTerm * keyTerm)
+{
+ Eterm* top;
+ Eterm key = GETKEY(common_table_data,keyTerm->tpl);
+ int key_size = size_object(key);
+ Uint offset = offsetof(DbTableCATreeNode,baseOrRoute) +
+ offsetof(DbTableCATreeRouteNode,key);
+ size_t route_node_container_size =
+ offset +
+ sizeof(DbTerm) +
+ sizeof(Eterm)*key_size;
+ ErlOffHeap tmp_offheap;
+ byte* new_route_node_container_bytes =
+ erts_db_alloc(ERTS_ALC_T_DB_TABLE,
+ (DbTable *) common_table_data,
+ route_node_container_size);
+ DbTerm* newp = (DbTerm*) (new_route_node_container_bytes + offset);
+ DbTableCATreeNode *new_route_node_container =
+ (DbTableCATreeNode*)new_route_node_container_bytes;
+ DbTableCATreeRouteNode *new_route_node =
+ &new_route_node_container->baseOrRoute.route;
+ new_route_node->tab = (DbTable *)common_table_data;
+ if (key_size != 0) {
+ newp->size = key_size;
+ top = &newp->tpl[1];
+ tmp_offheap.first = NULL;
+ newp->tpl[0] = copy_struct(key, key_size, &top, &tmp_offheap);
+ newp->first_oh = tmp_offheap.first;
+ } else {
+ newp->size = key_size;
+ newp->first_oh = NULL;
+ newp->tpl[0] = key;
+ }
+ new_route_node_container->is_base_node = 0;
+ new_route_node->is_valid = 1;
+ erts_atomic_init_nob(&(new_route_node->left), (erts_aint_t)left);
+ erts_atomic_init_nob(&(new_route_node->right), (erts_aint_t)right);
+ erts_mtx_init(&new_route_node->lock, "erl_db_catree_route_node",
+ NIL, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ return new_route_node_container;
+}
+
+static void free_catree_base_node(void* base_node_container_ptr)
+{
+ DbTableCATreeNode *base_node_container =
+ (DbTableCATreeNode *)base_node_container_ptr;
+ DbTableCATreeBaseNode *base_node =
+ &base_node_container->baseOrRoute.base;
+ erts_rwmtx_destroy(&base_node->lock);
+ erts_db_free(ERTS_ALC_T_DB_TABLE,
+ base_node->tab, base_node_container,
+ sizeof(DbTableCATreeNode));
+}
+
+static void free_catree_routing_node(void *route_node_container_ptr)
+{
+ size_t route_node_container_size;
+ byte* route_node_container_bytes = route_node_container_ptr;
+ DbTableCATreeNode *route_node_container =
+ (DbTableCATreeNode *)route_node_container_bytes;
+ DbTableCATreeRouteNode *route_node =
+ &route_node_container->baseOrRoute.route;
+ int key_size = route_node->key.size;
+ Uint offset = offsetof(DbTableCATreeNode,baseOrRoute) +
+ offsetof(DbTableCATreeRouteNode,key);
+ ErlOffHeap tmp_oh;
+ DbTerm* db_term = (DbTerm*) (route_node_container_bytes + offset);
+ erts_mtx_destroy(&route_node->lock);
+ route_node_container_size =
+ offset +
+ sizeof(DbTerm) +
+ sizeof(Eterm)*key_size;
+ if (key_size != 0) {
+ tmp_oh.first = db_term->first_oh;
+ erts_cleanup_offheap(&tmp_oh);
+ }
+ erts_db_free(ERTS_ALC_T_DB_TABLE,
+ route_node->tab,
+ route_node_container,
+ route_node_container_size);
+}
+
+/*
+ * Returns the parent routing node of the specified
+ * route_node_container if such a routing node exists or NULL if
+ * route_node_container is attached to the root
+ */
+static ERTS_INLINE DbTableCATreeNode *
+parent_of(DbTableCATree *tb,
+ DbTableCATreeNode *route_node_container)
+{
+
+ Eterm key = GET_ROUTE_NODE_KEY(route_node_container);
+ DbTableCATreeNode *current_node = GET_ROOT_ACQB(tb);
+ DbTableCATreeNode *prev_node = NULL;
+ if (current_node == route_node_container) {
+ return NULL;
+ }
+ while (current_node != route_node_container) {
+ prev_node = current_node;
+ if (cmp_key_route((DbTableCommon *)tb, key, current_node) < 0) {
+ current_node = GET_LEFT_ACQB(current_node);
+ } else {
+ current_node = GET_RIGHT_ACQB(current_node);
+ }
+ }
+ return prev_node;
+}
+
+
+static ERTS_INLINE DbTableCATreeNode *
+leftmost_base_node(DbTableCATreeNode *root)
+{
+ DbTableCATreeNode *node = root;
+ while (!node->is_base_node) {
+ node = GET_LEFT_ACQB(node);
+ }
+ return node;
+}
+
+
+static ERTS_INLINE DbTableCATreeNode *
+rightmost_base_node(DbTableCATreeNode *root)
+{
+ DbTableCATreeNode *node = root;
+ while (!node->is_base_node) {
+ node = GET_RIGHT_ACQB(node);
+ }
+ return node;
+}
+
+
+static ERTS_INLINE DbTableCATreeNode *
+leftmost_route_node(DbTableCATreeNode *root)
+{
+ DbTableCATreeNode *node = root;
+ DbTableCATreeNode *prev_node = NULL;
+ while (!node->is_base_node) {
+ prev_node = node;
+ node = GET_LEFT_ACQB(node);
+ }
+ if (prev_node == NULL) {
+ return NULL;
+ } else {
+ return prev_node;
+ }
+}
+
+static ERTS_INLINE DbTableCATreeNode*
+rightmost_route_node(DbTableCATreeNode *root)
+{
+ DbTableCATreeNode * node = root;
+ DbTableCATreeNode * prev_node = NULL;
+ while (!node->is_base_node) {
+ prev_node = node;
+ node = GET_RIGHT_ACQB(node);
+ }
+ if (prev_node == NULL) {
+ return NULL;
+ } else {
+ return prev_node;
+ }
+}
+
+static ERTS_INLINE DbTableCATreeNode*
+leftmost_base_node_and_path(DbTableCATreeNode *root, CATreeNodeStack * stack)
+{
+ DbTableCATreeNode * node = root;
+ while (!node->is_base_node) {
+ PUSH_NODE(stack, node);
+ node = GET_LEFT_ACQB(node);
+ }
+ return node;
+}
+
+static ERTS_INLINE DbTableCATreeNode*
+get_next_base_node_and_path(DbTableCommon *common_table_data,
+ DbTableCATreeNode *base_node,
+ CATreeNodeStack *stack)
+{
+ if (EMPTY_NODE(stack)) { /* The parent of b is the root */
+ return NULL;
+ } else {
+ if (GET_LEFT(TOP_NODE(stack)) == base_node) {
+ return leftmost_base_node_and_path(
+ GET_RIGHT_ACQB(TOP_NODE(stack)),
+ stack);
+ } else {
+ Eterm pkey =
+ TOP_NODE(stack)->baseOrRoute.route.key.tpl[0]; /* pKey = key of parent */
+ POP_NODE(stack);
+ while (!EMPTY_NODE(stack)) {
+ if (TOP_NODE(stack)->baseOrRoute.route.is_valid &&
+ cmp_key_route(common_table_data, pkey, TOP_NODE(stack)) <= 0) {
+ return leftmost_base_node_and_path(GET_RIGHT_ACQB(TOP_NODE(stack)), stack);
+ } else {
+ POP_NODE(stack);
+ }
+ }
+ }
+ return NULL;
+ }
+}
+
+static ERTS_INLINE void
+clone_stack(CATreeNodeStack *search_stack_ptr,
+ CATreeNodeStack *search_stack_copy_ptr)
+{
+ int i;
+ search_stack_copy_ptr->pos = search_stack_ptr->pos;
+ for (i = 0; i < search_stack_ptr->pos; i++) {
+ search_stack_copy_ptr->array[i] = search_stack_ptr->array[i];
+ }
+}
+
+
+
+static ERTS_INLINE DbTableCATreeNode*
+lock_first_base_node(DbTable *tbl,
+ CATreeNodeStack *search_stack_ptr,
+ CATreeNodeStack *locked_base_nodes_stack_ptr)
+{
+ int retry;
+ DbTableCATreeNode *current_node;
+ DbTableCATreeBaseNode *base_node;
+ DbTableCATree* tb = &tbl->catree;
+ do {
+ retry = 0;
+ current_node = GET_ROOT_ACQB(tb);
+ while ( ! current_node->is_base_node ) {
+ PUSH_NODE(search_stack_ptr, current_node);
+ current_node = GET_LEFT_ACQB(current_node);
+ }
+ base_node = &current_node->baseOrRoute.base;
+ rlock_base_node(base_node);
+ if ( ! base_node->is_valid ) {
+ /* Retry */
+ runlock_base_node(base_node);
+ retry = 1;
+ }
+ } while(retry);
+ push_node_dyn_array(tbl, locked_base_nodes_stack_ptr, current_node);
+ return current_node;
+}
+
+static ERTS_INLINE DbTableCATreeBaseNode*
+find_and_lock_next_base_node_and_path(DbTable *tbl,
+ CATreeNodeStack **search_stack_ptr_ptr,
+ CATreeNodeStack **search_stack_copy_ptr_ptr,
+ CATreeNodeStack *locked_base_nodes_stack_ptr)
+{
+ DbTableCATreeNode *current_node;
+ DbTableCATreeBaseNode *base_node;
+ CATreeNodeStack * tmp_stack_ptr;
+ DbTableCommon* common_table_data;
+ retry_find_and_lock_next_base_node:
+ current_node = TOP_NODE(locked_base_nodes_stack_ptr);
+ common_table_data = &tbl->common;
+ clone_stack(*search_stack_ptr_ptr, *search_stack_copy_ptr_ptr);
+ current_node =
+ get_next_base_node_and_path(common_table_data, current_node, *search_stack_ptr_ptr);
+ if (current_node == NULL) {
+ return NULL;
+ }
+ base_node = &current_node->baseOrRoute.base;
+ rlock_base_node(base_node);
+ if ( ! base_node->is_valid ) {
+ /* Retry */
+ runlock_base_node(base_node);
+ /* Revert to previous state */
+ current_node = TOP_NODE(locked_base_nodes_stack_ptr);
+ tmp_stack_ptr = *search_stack_ptr_ptr;
+ *search_stack_ptr_ptr = *search_stack_copy_ptr_ptr;
+ *search_stack_copy_ptr_ptr = tmp_stack_ptr;
+ goto retry_find_and_lock_next_base_node;
+ } else {
+ push_node_dyn_array(tbl, locked_base_nodes_stack_ptr, current_node);
+ }
+ return base_node;
+}
+
+static ERTS_INLINE
+void unlock_and_release_locked_base_node_stack(DbTable *tbl,
+ CATreeNodeStack *locked_base_nodes_stack_ptr)
+{
+ DbTableCATreeNode *current_node;
+ DbTableCATreeBaseNode *base_node;
+ int i;
+ for (i = 0; i < locked_base_nodes_stack_ptr->pos; i++) {
+ current_node = locked_base_nodes_stack_ptr->array[i];
+ base_node = &current_node->baseOrRoute.base;
+ if (locked_base_nodes_stack_ptr->pos > 1) {
+ base_node->lock_statistics = /* This is not atomic which is fine as */
+ base_node->lock_statistics + /* correctness does not depend on that. */
+ ERL_DB_CATREE_LOCK_MORE_THAN_ONE_CONTRIBUTION;
+ }
+ runlock_base_node(base_node);
+ }
+ if (locked_base_nodes_stack_ptr->size > STACK_NEED) {
+ erts_db_free(ERTS_ALC_T_DB_STK, tbl,
+ locked_base_nodes_stack_ptr->array,
+ sizeof(DbTableCATreeNode *) * locked_base_nodes_stack_ptr->size);
+ }
+}
+
+static ERTS_INLINE
+void init_stack(CATreeNodeStack *stack,
+ DbTableCATreeNode **stack_array,
+ Uint init_size)
+{
+ stack->array = stack_array;
+ stack->pos = 0;
+ stack->size = init_size;
+}
+
+static ERTS_INLINE
+void init_tree_stack(DbTreeStack *stack,
+ TreeDbTerm **stack_array,
+ Uint init_slot)
+{
+ stack->array = stack_array;
+ stack->pos = 0;
+ stack->slot = init_slot;
+}
+
+#define DEC_ROUTE_NODE_STACK_AND_ARRAY(STACK_NAME) \
+ CATreeNodeStack STACK_NAME; \
+ CATreeNodeStack * STACK_NAME##_ptr = &(STACK_NAME); \
+ DbTableCATreeNode *STACK_NAME##_array[STACK_NEED];
+
+#define DECLARE_AND_INIT_BASE_NODE_SEARCH_STACKS \
+DEC_ROUTE_NODE_STACK_AND_ARRAY(search_stack) \
+DEC_ROUTE_NODE_STACK_AND_ARRAY(search_stack_copy) \
+DEC_ROUTE_NODE_STACK_AND_ARRAY(locked_base_nodes_stack) \
+init_stack(&search_stack, search_stack_array, 0); \
+init_stack(&search_stack_copy, search_stack_copy_array, 0); \
+init_stack(&locked_base_nodes_stack, locked_base_nodes_stack_array, STACK_NEED);/* Abuse as stack array size*/
+
+/*
+ * Locks and returns the base node that contains the specified key if
+ * it is present. The function saves the search path to the found base
+ * node in search_stack_ptr and adds the found base node to
+ * locked_base_nodes_stack_ptr.
+ */
+static ERTS_INLINE DbTableCATreeBaseNode *
+lock_base_node_with_key(DbTable *tbl,
+ Eterm key,
+ CATreeNodeStack * search_stack_ptr,
+ CATreeNodeStack * locked_base_nodes_stack_ptr)
+{
+ int retry;
+ DbTableCATreeNode *current_node;
+ DbTableCATreeBaseNode *base_node;
+ DbTableCATree* tb = &tbl->catree;
+ DbTableCommon* common_table_data = &tbl->common;
+ do {
+ retry = 0;
+ current_node = GET_ROOT_ACQB(tb);
+ while ( ! current_node->is_base_node ) {
+ PUSH_NODE(search_stack_ptr, current_node);
+ if( cmp_key_route(common_table_data,key,current_node) < 0 ) {
+ current_node = GET_LEFT_ACQB(current_node);
+ } else {
+ current_node = GET_RIGHT_ACQB(current_node);
+ }
+ }
+ base_node = &current_node->baseOrRoute.base;
+ rlock_base_node(base_node);
+ if ( ! base_node->is_valid ) {
+ /* Retry */
+ runlock_base_node(base_node);
+ retry = 1;
+ }
+ } while(retry);
+ push_node_dyn_array(tbl, locked_base_nodes_stack_ptr, current_node);
+ return base_node;
+}
+
+/*
+ * Joins a base node with it's right neighbor. Returns the base node
+ * resulting from the join in locked state or NULL if there is no base
+ * node to join with.
+ */
+static DbTableCATreeNode*
+erl_db_catree_force_join_right(DbTableCommon *common_table_data,
+ DbTableCATreeNode *parent_container,
+ DbTableCATreeNode *base_container,
+ DbTableCATreeNode **result_parent_wb)
+{
+ DbTableCATreeRouteNode *parent;
+ DbTableCATreeNode *gparent_container;
+ DbTableCATreeRouteNode *gparent;
+ DbTableCATreeBaseNode *base = &base_container->baseOrRoute.base;
+ DbTableCATree *tb = (DbTableCATree *)common_table_data;
+ DbTableCATreeNode *neighbor_base_container;
+ DbTableCATreeBaseNode *neighbor_base;
+ DbTableCATreeNode *new_neighbor_base;
+ DbTableCATreeNode *neighbor_base_parent;
+ int neighbour_not_valid;
+ if (parent_container == NULL) {
+ return NULL;
+ }
+ parent = &parent_container->baseOrRoute.route;
+ do {
+ neighbor_base_container = leftmost_base_node(GET_RIGHT_ACQB(parent_container));
+ neighbor_base = &neighbor_base_container->baseOrRoute.base;
+ wlock_base_node_no_stats(neighbor_base);
+ neighbour_not_valid = !neighbor_base->is_valid;
+ if (neighbour_not_valid) {
+ wunlock_base_node(neighbor_base);
+ }
+ } while (neighbour_not_valid);
+ lock_route_node(parent);
+ parent->is_valid = 0;
+ neighbor_base->is_valid = 0;
+ base->is_valid = 0;
+ gparent = NULL;
+ gparent_container = NULL;
+ do {
+ if (gparent != NULL) {
+ unlock_route_node(gparent);
+ }
+ gparent_container = parent_of(tb, parent_container);
+ if (gparent_container != NULL) {
+ gparent = &gparent_container->baseOrRoute.route;
+ lock_route_node(gparent);
+ } else {
+ gparent = NULL;
+ }
+ } while (gparent != NULL && !gparent->is_valid);
+ if (gparent == NULL) {
+ SET_ROOT_RELB(tb, GET_RIGHT(parent_container));
+ } else if (GET_LEFT(gparent_container) == parent_container) {
+ SET_LEFT_RELB(gparent_container, GET_RIGHT(parent_container));
+ } else {
+ SET_RIGHT_RELB(gparent_container, GET_RIGHT(parent_container));
+ }
+ unlock_route_node(parent);
+ if (gparent != NULL) {
+ unlock_route_node(gparent);
+ }
+ new_neighbor_base = create_catree_base_node(tb);
+ new_neighbor_base->baseOrRoute.base.root =
+ join_trees(base->root, neighbor_base->root);
+ wlock_base_node_no_stats(&(new_neighbor_base->baseOrRoute.base));
+ neighbor_base_parent = NULL;
+ if (GET_RIGHT(parent_container) == neighbor_base_container) {
+ neighbor_base_parent = gparent_container;
+ } else {
+ neighbor_base_parent =
+ leftmost_route_node(GET_RIGHT(parent_container));
+ }
+ if(neighbor_base_parent == NULL) {
+ SET_ROOT_RELB(tb, new_neighbor_base);
+ } else if (GET_LEFT(neighbor_base_parent) == neighbor_base_container) {
+ SET_LEFT_RELB(neighbor_base_parent, new_neighbor_base);
+ } else {
+ SET_RIGHT_RELB(neighbor_base_parent, new_neighbor_base);
+ }
+ wunlock_base_node(base);
+ wunlock_base_node(neighbor_base);
+ /* Free the parent and base */
+ erts_schedule_thr_prgr_later_op(free_catree_routing_node,
+ parent_container,
+ &parent->free_item);
+ erts_schedule_thr_prgr_later_op(free_catree_base_node,
+ base_container,
+ &base->free_item);
+ erts_schedule_thr_prgr_later_op(free_catree_base_node,
+ neighbor_base_container,
+ &neighbor_base->free_item);
+
+ if (parent_container == neighbor_base_container) {
+ *result_parent_wb = gparent_container;
+ } else {
+ *result_parent_wb = neighbor_base_parent;
+ }
+ return new_neighbor_base;
+}
+
+/*
+ * Used to merge together all base nodes for operations such as last
+ * and select_*. Returns the base node resulting from the merge in
+ * locked state.
+ */
+static DbTableCATreeNode *
+merge_to_one_locked_base_node(DbTableCommon * common_table_data)
+{
+ DbTableCATreeNode *parent_container;
+ DbTableCATreeNode *new_parent_container;
+ DbTableCATree *tb = (DbTableCATree *)common_table_data;
+ DbTableCATreeNode *base_container;
+ DbTableCATreeNode *new_base_container;
+ int is_not_valid;
+ /* Find first base node */
+ do {
+ parent_container = NULL;
+ base_container = GET_ROOT_ACQB(tb);
+ while ( ! base_container->is_base_node ) {
+ parent_container = base_container;
+ base_container = GET_LEFT_ACQB(base_container);
+ }
+ wlock_base_node_no_stats(&(base_container->baseOrRoute.base));
+ is_not_valid = ! base_container->baseOrRoute.base.is_valid;
+ if (is_not_valid) {
+ wunlock_base_node(&(base_container->baseOrRoute.base));
+ }
+ } while(is_not_valid);
+ do {
+ new_base_container = erl_db_catree_force_join_right(common_table_data,
+ parent_container,
+ base_container,
+ &new_parent_container);
+ if (new_base_container != NULL) {
+ base_container = new_base_container;
+ parent_container = new_parent_container;
+ }
+ } while(new_base_container != NULL);
+ return base_container;
+}
+
+
+static void join_catree(DbTableCATree *tb,
+ DbTableCATreeNode *parent_container,
+ DbTableCATreeNode *base_container)
+{
+ DbTableCATreeRouteNode *parent;
+ DbTableCATreeNode *gparent_container;
+ DbTableCATreeRouteNode *gparent;
+ DbTableCATreeBaseNode *base = &base_container->baseOrRoute.base;
+ DbTableCATreeNode *neighbor_base_container;
+ DbTableCATreeBaseNode *neighbor_base;
+ DbTableCATreeNode *new_neighbor_base;
+ DbTableCATreeNode *neighbor_base_parent;
+ if (parent_container == NULL) {
+ base->lock_statistics = 0;
+ wunlock_base_node(base);
+ return;
+ }
+ parent = &parent_container->baseOrRoute.route;
+ if (GET_LEFT(parent_container) == base_container) {
+ neighbor_base_container = leftmost_base_node(GET_RIGHT_ACQB(parent_container));
+ neighbor_base = &neighbor_base_container->baseOrRoute.base;
+ if (try_wlock_base_node(neighbor_base)) {
+ /* Failed to acquire lock */
+ base->lock_statistics = 0;
+ wunlock_base_node(base);
+ return;
+ } else if (!neighbor_base->is_valid) {
+ base->lock_statistics = 0;
+ wunlock_base_node(base);
+ wunlock_base_node(neighbor_base);
+ return;
+ } else {
+ lock_route_node(parent);
+ parent->is_valid = 0;
+ neighbor_base->is_valid = 0;
+ base->is_valid = 0;
+ gparent = NULL;
+ gparent_container = NULL;
+ do {
+ if (gparent != NULL) {
+ unlock_route_node(gparent);
+ }
+ gparent_container = parent_of(tb, parent_container);
+ if (gparent_container != NULL) {
+ gparent = &gparent_container->baseOrRoute.route;
+ lock_route_node(gparent);
+ } else {
+ gparent = NULL;
+ }
+ } while (gparent != NULL && !gparent->is_valid);
+ if (gparent == NULL) {
+ SET_ROOT_RELB(tb, GET_RIGHT(parent_container));
+ } else if (GET_LEFT(gparent_container) == parent_container) {
+ SET_LEFT_RELB(gparent_container, GET_RIGHT(parent_container));
+ } else {
+ SET_RIGHT_RELB(gparent_container, GET_RIGHT(parent_container));
+ }
+ unlock_route_node(parent);
+ if (gparent != NULL) {
+ unlock_route_node(gparent);
+ }
+ new_neighbor_base = create_catree_base_node(tb);
+ new_neighbor_base->baseOrRoute.base.root =
+ join_trees(base->root, neighbor_base->root);
+ neighbor_base_parent = NULL;
+ if (GET_RIGHT(parent_container) == neighbor_base_container) {
+ neighbor_base_parent = gparent_container;
+ } else {
+ neighbor_base_parent =
+ leftmost_route_node(GET_RIGHT(parent_container));
+ }
+ }
+ } else { /* Symetric case */
+ neighbor_base_container = rightmost_base_node(GET_LEFT_ACQB(parent_container));
+ neighbor_base = &neighbor_base_container->baseOrRoute.base;
+ if (try_wlock_base_node(neighbor_base)) {
+ /* Failed to acquire lock */
+ base->lock_statistics = 0;
+ wunlock_base_node(base);
+ return;
+ } else if (!neighbor_base->is_valid) {
+ base->lock_statistics = 0;
+ wunlock_base_node(base);
+ wunlock_base_node(neighbor_base);
+ return;
+ } else {
+ lock_route_node(parent);
+ parent->is_valid = 0;
+ neighbor_base->is_valid = 0;
+ base->is_valid = 0;
+ gparent = NULL;
+ gparent_container = NULL;
+ do {
+ if (gparent != NULL) {
+ unlock_route_node(gparent);
+ }
+ gparent_container = parent_of(tb, parent_container);
+ if (gparent_container != NULL) {
+ gparent = &gparent_container->baseOrRoute.route;
+ lock_route_node(gparent);
+ } else {
+ gparent = NULL;
+ }
+ } while (gparent != NULL && !gparent->is_valid);
+ if (gparent == NULL) {
+ SET_ROOT_RELB(tb, GET_LEFT(parent_container));
+ } else if (GET_RIGHT(gparent_container) == parent_container) {
+ SET_RIGHT_RELB(gparent_container, GET_LEFT(parent_container));
+ } else {
+ SET_LEFT_RELB(gparent_container, GET_LEFT(parent_container));
+ }
+ unlock_route_node(parent);
+ if (gparent != NULL) {
+ unlock_route_node(gparent);
+ }
+ new_neighbor_base = create_catree_base_node(tb);
+ new_neighbor_base->baseOrRoute.base.root =
+ join_trees(neighbor_base->root, base->root);
+ neighbor_base_parent = NULL;
+ if (GET_LEFT(parent_container) == neighbor_base_container) {
+ neighbor_base_parent = gparent_container;
+ } else {
+ neighbor_base_parent =
+ rightmost_route_node(GET_LEFT(parent_container));
+ }
+ }
+ }
+ /* Link in new neighbor and free nodes that are no longer in the tree */
+ if (neighbor_base_parent == NULL) {
+ SET_ROOT_RELB(tb, new_neighbor_base);
+ } else if (GET_LEFT(neighbor_base_parent) == neighbor_base_container) {
+ SET_LEFT_RELB(neighbor_base_parent, new_neighbor_base);
+ } else {
+ SET_RIGHT_RELB(neighbor_base_parent, new_neighbor_base);
+ }
+ wunlock_base_node(base);
+ wunlock_base_node(neighbor_base);
+ /* Free the parent and base */
+ erts_schedule_thr_prgr_later_op(free_catree_routing_node,
+ parent_container,
+ &parent->free_item);
+ erts_schedule_thr_prgr_later_op(free_catree_base_node,
+ base_container,
+ &base->free_item);
+ erts_schedule_thr_prgr_later_op(free_catree_base_node,
+ neighbor_base_container,
+ &neighbor_base->free_item);
+}
+
+
+static void split_catree(DbTableCommon *tb,
+ DbTableCATreeNode *parent_container,
+ DbTableCATreeNode *base_container) {
+ TreeDbTerm *splitOutWriteBack;
+ TreeDbTerm *leftWriteBack;
+ TreeDbTerm *rightWriteBack;
+ DbTableCATreeNode *left_base_node;
+ DbTableCATreeNode *right_base_node;
+ DbTableCATreeNode *routing_node_container;
+ DbTableCATreeBaseNode *base = &base_container->baseOrRoute.base;
+ DbTableCATreeRouteNode *parent;
+ if (parent_container == NULL) {
+ parent = NULL;
+ } else {
+ parent = &parent_container->baseOrRoute.route;
+ }
+
+ if (less_than_two_elements(base->root)) {
+ base->lock_statistics = 0;
+ wunlock_base_node(base);
+ return;
+ } else {
+ /* Split the tree */
+ split_tree(tb,
+ base->root,
+ &splitOutWriteBack,
+ &leftWriteBack,
+ &rightWriteBack);
+ /* Create new base nodes */
+ left_base_node =
+ create_catree_base_node((DbTableCATree*)tb);
+ right_base_node =
+ create_catree_base_node((DbTableCATree*)tb);
+ left_base_node->baseOrRoute.base.root = leftWriteBack;
+ right_base_node->baseOrRoute.base.root = rightWriteBack;
+ routing_node_container = create_catree_route_node(tb,
+ left_base_node,
+ right_base_node,
+ &splitOutWriteBack->dbterm);
+ if (parent == NULL) {
+ SET_ROOT_RELB((DbTableCATree*)tb, routing_node_container);
+ } else if(GET_LEFT(parent_container) == base_container) {
+ SET_LEFT_RELB(parent_container, routing_node_container);
+ } else {
+ SET_RIGHT_RELB(parent_container, routing_node_container);
+ }
+ base->is_valid = 0;
+ wunlock_base_node(base);
+ erts_schedule_thr_prgr_later_op(free_catree_base_node,
+ base_container,
+ &base->free_item);
+ }
+}
+
+/*
+ * Helper functions for removing the table
+ */
+
+static void catree_add_base_node_to_free_list(
+ DbTableCATree *tb,
+ DbTableCATreeNode *base_node_container)
+{
+ base_node_container->baseOrRoute.base.next =
+ tb->base_nodes_to_free_list;
+ tb->base_nodes_to_free_list = base_node_container;
+}
+
+static void catree_deque_base_node_from_free_list(DbTableCATree *tb)
+{
+ if (tb->base_nodes_to_free_list == NULL) {
+ return; /* List empty */
+ } else {
+ DbTableCATreeNode *first = tb->base_nodes_to_free_list;
+ tb->base_nodes_to_free_list = first->baseOrRoute.base.next;
+ }
+}
+
+static DbTableCATreeNode *catree_first_base_node_from_free_list(
+ DbTableCATree *tb)
+{
+ return tb->base_nodes_to_free_list;
+}
+
+static SWord do_free_routing_nodes_catree_cont(DbTableCATree *tb, SWord num_left)
+{
+ DbTableCATreeNode *root;
+ DbTableCATreeNode *p;
+ for (;;) {
+ root = POP_NODE(&tb->free_stack_rnodes);
+ if (root == NULL) break;
+ else if(root->is_base_node) {
+ catree_add_base_node_to_free_list(tb, root);
+ break;
+ }
+ for (;;) {
+ if ((GET_LEFT(root) != NULL) &&
+ (p = GET_LEFT(root))->is_base_node) {
+ SET_LEFT(root, NULL);
+ catree_add_base_node_to_free_list(tb, p);
+ } else if ((GET_RIGHT(root) != NULL) &&
+ (p = GET_RIGHT(root))->is_base_node) {
+ SET_RIGHT(root, NULL);
+ catree_add_base_node_to_free_list(tb, p);
+ } else if ((p = GET_LEFT(root)) != NULL) {
+ SET_LEFT(root, NULL);
+ PUSH_NODE(&tb->free_stack_rnodes, root);
+ root = p;
+ } else if ((p = GET_RIGHT(root)) != NULL) {
+ SET_RIGHT(root, NULL);
+ PUSH_NODE(&tb->free_stack_rnodes, root);
+ root = p;
+ } else {
+ free_catree_routing_node(root);
+ if (--num_left >= 0) {
+ break;
+ } else {
+ return num_left; /* Done enough for now */
+ }
+ }
+ }
+ }
+ return num_left;
+}
+
+static SWord do_free_base_node_cont(DbTableCATree *tb, SWord num_left)
+{
+ TreeDbTerm *root;
+ TreeDbTerm *p;
+ DbTableCATreeNode *base_node_container =
+ catree_first_base_node_from_free_list(tb);
+ for (;;) {
+ root = POP_NODE(&tb->free_stack_elems);
+ if (root == NULL) break;
+ for (;;) {
+ if ((p = root->left) != NULL) {
+ root->left = NULL;
+ PUSH_NODE(&tb->free_stack_elems, root);
+ root = p;
+ } else if ((p = root->right) != NULL) {
+ root->right = NULL;
+ PUSH_NODE(&tb->free_stack_elems, root);
+ root = p;
+ } else {
+ free_term((DbTable*)tb, root);
+ if (--num_left >= 0) {
+ break;
+ } else {
+ return num_left; /* Done enough for now */
+ }
+ }
+ }
+ }
+ catree_deque_base_node_from_free_list(tb);
+ free_catree_base_node(base_node_container);
+ base_node_container = catree_first_base_node_from_free_list(tb);
+ if (base_node_container != NULL) {
+ PUSH_NODE(&tb->free_stack_elems, base_node_container->baseOrRoute.base.root);
+ }
+ return num_left;
+}
+
+
+/*
+** Initialization function
+*/
+
+void db_initialize_catree(void)
+{
+ return;
+};
+
+/*
+** Table interface routines (i.e., what's called by the bif's)
+*/
+
+int db_create_catree(Process *p, DbTable *tbl)
+{
+ DbTableCATree *tb = &tbl->catree;
+ DbTableCATreeNode *root = create_catree_base_node(tb);
+ tb->deletion = 0;
+ tb->base_nodes_to_free_list = NULL;
+ erts_atomic_init_relb(&(tb->root), (erts_aint_t)root);
+ return DB_ERROR_NONE;
+}
+
+static int db_first_catree(Process *p, DbTable *tbl, Eterm *ret)
+{
+ DbTableCATreeBaseNode *base_node;
+ int result;
+ DECLARE_AND_INIT_BASE_NODE_SEARCH_STACKS;
+ /* Find first base node */
+ base_node = &(lock_first_base_node(tbl, &search_stack, &locked_base_nodes_stack)->baseOrRoute.base);
+ /* Find next base node until non-empty base node is found */
+ while (base_node != NULL && base_node->root == NULL) {
+ base_node = find_and_lock_next_base_node_and_path(tbl, &search_stack_ptr, &search_stack_copy_ptr, locked_base_nodes_stack_ptr);
+ }
+ /* Get the return value */
+ result = db_first_tree_common(p, tbl, (base_node == NULL ? NULL : base_node->root), ret, NULL);
+ /* Unlock base nodes */
+ unlock_and_release_locked_base_node_stack(tbl, locked_base_nodes_stack_ptr);
+ return result;
+}
+
+static int db_next_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTreeStack next_search_stack;
+ TreeDbTerm *next_search_stack_array[STACK_NEED];
+ DbTableCATreeBaseNode *base_node;
+ int result = 0;
+ DECLARE_AND_INIT_BASE_NODE_SEARCH_STACKS;
+ init_tree_stack(&next_search_stack, next_search_stack_array, 0);
+ /* Lock base node with key */
+ base_node = lock_base_node_with_key(tbl, key, &search_stack, &locked_base_nodes_stack);
+ /* Continue until key is not >= greatest key in base_node */
+ while (base_node != NULL) {
+ result = db_next_tree_common(p, tbl, base_node->root, key,
+ ret, &next_search_stack);
+ if (result != DB_ERROR_NONE || *ret != am_EOT) {
+ break;
+ }
+ base_node =
+ find_and_lock_next_base_node_and_path(tbl,
+ &search_stack_ptr,
+ &search_stack_copy_ptr,
+ locked_base_nodes_stack_ptr);
+ }
+ unlock_and_release_locked_base_node_stack(tbl, &locked_base_nodes_stack);
+ return result;
+}
+
+static int db_last_catree(Process *p, DbTable *tbl, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ DbTableCATreeNode *base = merge_to_one_locked_base_node(&tb->common);
+ int result = db_last_tree_common(p, tbl, base->baseOrRoute.base.root, ret, NULL);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+
+}
+
+static int db_prev_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTreeStack stack;
+ TreeDbTerm * stack_array[STACK_NEED];
+ DbTableCATree *tb = &tbl->catree;
+ int result;
+ DbTableCATreeNode *base;
+ init_tree_stack(&stack, stack_array, 0);
+ base = merge_to_one_locked_base_node(&tb->common);
+ result = db_prev_tree_common(p, tbl, base->baseOrRoute.base.root, key, ret, &stack);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+}
+
+#define ERL_DB_CATREE_DO_OPERATION_PUT_PARAMS Eterm obj, int key_clash_fail
+ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION(put,
+ ERL_DB_CATREE_DO_OPERATION_PUT_PARAMS,
+ db_put_tree_common(&tb->common,
+ &base_node->root,
+ obj,
+ key_clash_fail,
+ NULL);)
+
+static int db_put_catree(DbTable *tbl, Eterm obj, int key_clash_fail)
+{
+ DbTableCATree *tb = &tbl->catree;
+ Eterm key = GETKEY(&tb->common, tuple_val(obj));
+ return erl_db_catree_do_operation_put(tb,
+ key,
+ obj,
+ key_clash_fail);
+}
+
+#define ERL_DB_CATREE_DO_OPERATION_GET_PARAMS Process *p, Eterm *ret
+ERL_DB_CATREE_CREATE_DO_READ_OPERATION_FUNCTION(get,
+ ERL_DB_CATREE_DO_OPERATION_GET_PARAMS,
+ db_get_tree_common(p, &tb->common,
+ base_node->root,
+ key, ret, NULL);)
+
+static int db_get_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ return erl_db_catree_do_operation_get(tb, key, p, ret);
+}
+
+#define ERL_DB_CATREE_DO_OPERATION_MEMBER_PARAMS Eterm *ret
+ERL_DB_CATREE_CREATE_DO_READ_OPERATION_FUNCTION(member,
+ ERL_DB_CATREE_DO_OPERATION_MEMBER_PARAMS,
+ db_member_tree_common(&tb->common,
+ base_node->root,
+ key,
+ ret,
+ NULL);)
+
+static int db_member_catree(DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ return erl_db_catree_do_operation_member(tb, key, ret);
+}
+
+#define ERL_DB_CATREE_DO_OPERATION_GET_ELEMENT_PARAMS Process *p, int ndex, Eterm *ret
+ERL_DB_CATREE_CREATE_DO_READ_OPERATION_FUNCTION(get_element,
+ ERL_DB_CATREE_DO_OPERATION_GET_ELEMENT_PARAMS,
+ db_get_element_tree_common(p, &tb->common,
+ base_node->root,
+ key, ndex,
+ ret, NULL))
+
+static int db_get_element_catree(Process *p, DbTable *tbl,
+ Eterm key, int ndex, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ return erl_db_catree_do_operation_get_element(tb, key, p, ndex, ret);
+}
+
+#define ERL_DB_CATREE_DO_OPERATION_ERASE_PARAMS DbTable *tbl, Eterm *ret
+ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION(erase,
+ ERL_DB_CATREE_DO_OPERATION_ERASE_PARAMS,
+ db_erase_tree_common(tbl,
+ &base_node->root,
+ key,
+ ret,
+ NULL);)
+
+static int db_erase_catree(DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ return erl_db_catree_do_operation_erase(tb, key, tbl, ret);
+}
+
+#define ERL_DB_CATREE_DO_OPERATION_ERASE_OBJECT_PARAMS DbTable *tbl, Eterm object, Eterm *ret
+ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION(erase_object,
+ ERL_DB_CATREE_DO_OPERATION_ERASE_OBJECT_PARAMS,
+ db_erase_object_tree_common(tbl,
+ &base_node->root,
+ object,
+ ret,
+ NULL);)
+
+static int db_erase_object_catree(DbTable *tbl, Eterm object, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ Eterm key = GETKEY(&tb->common, tuple_val(object));
+ return erl_db_catree_do_operation_erase_object(tb, key, tbl, object, ret);
+}
+
+
+static int db_slot_catree(Process *p, DbTable *tbl,
+ Eterm slot_term, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ int result;
+ DbTableCATreeNode *base;
+ base = merge_to_one_locked_base_node(&tb->common);
+ result = db_slot_tree_common(p, tbl, base->baseOrRoute.base.root,
+ slot_term, ret, NULL);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+}
+
+static int db_select_continue_catree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ int result;
+ DbTableCATreeNode *base;
+ base = merge_to_one_locked_base_node(&tb->common);
+ result = db_select_continue_tree_common(p, &tb->common, &base->baseOrRoute.base.root,
+ continuation, ret, NULL);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+}
+
+
+static int db_select_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, int reverse, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ int result;
+ DbTableCATreeNode *base;
+ base = merge_to_one_locked_base_node(&tb->common);
+ result = db_select_tree_common(p, &tb->common, &base->baseOrRoute.base.root,
+ tid, pattern, reverse, ret,
+ NULL);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+}
+
+static int db_select_count_continue_catree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ int result;
+ DbTableCATreeNode *base;
+ base = merge_to_one_locked_base_node(&tb->common);
+ result = db_select_count_continue_tree_common(p, &tb->common,
+ &base->baseOrRoute.base.root,
+ continuation, ret, NULL);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+}
+
+static int db_select_count_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ int result;
+ DbTableCATreeNode *base;
+ base = merge_to_one_locked_base_node(&tb->common);
+ result = db_select_count_tree_common(p, &tb->common, &base->baseOrRoute.base.root,
+ tid, pattern, ret, NULL);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+}
+
+static int db_select_chunk_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Sint chunk_size,
+ int reversed, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ int result;
+ DbTableCATreeNode *base;
+ base = merge_to_one_locked_base_node(&tb->common);
+ result = db_select_chunk_tree_common(p, &tb->common, &base->baseOrRoute.base.root,
+ tid, pattern, chunk_size, reversed, ret, NULL);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+}
+
+static int db_select_delete_continue_catree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ DbTreeStack stack;
+ TreeDbTerm * stack_array[STACK_NEED];
+ int result;
+ DbTableCATreeNode *base;
+ init_tree_stack(&stack, stack_array, 0);
+ base = merge_to_one_locked_base_node(&tb->common);
+ result = db_select_delete_continue_tree_common(p, tbl, &base->baseOrRoute.base.root,
+ continuation, ret, &stack);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+}
+
+static int db_select_delete_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ DbTreeStack stack;
+ TreeDbTerm * stack_array[STACK_NEED];
+ int result;
+ DbTableCATreeNode *base;
+ init_tree_stack(&stack, stack_array, 0);
+ base = merge_to_one_locked_base_node(&tb->common);
+ result = db_select_delete_tree_common(p, tbl, &base->baseOrRoute.base.root,
+ tid, pattern, ret, &stack);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+}
+
+static int db_select_replace_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ int result;
+ DbTableCATreeNode *base;
+ base = merge_to_one_locked_base_node(&tb->common);
+ result = db_select_replace_tree_common(p, &tb->common,
+ &base->baseOrRoute.base.root,
+ tid, pattern, ret, NULL);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+}
+
+static int db_select_replace_continue_catree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ int result;
+ DbTableCATreeNode *base;
+ base = merge_to_one_locked_base_node(&tb->common);
+ result = db_select_replace_continue_tree_common(p, &tb->common,
+ &base->baseOrRoute.base.root,
+ continuation, ret, NULL);
+ wunlock_base_node(&(base->baseOrRoute.base));
+ return result;
+}
+
+#define ERL_DB_CATREE_DO_OPERATION_TAKE_PARAMS Process *p, Eterm *ret, DbTable *tbl
+ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION(take,
+ ERL_DB_CATREE_DO_OPERATION_TAKE_PARAMS,
+ db_take_tree_common(p, tbl,
+ &base_node->root,
+ key, ret, NULL);)
+
+static int db_take_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ return erl_db_catree_do_operation_take(tb, key, p, ret, tbl);
+}
+
+/*
+** Other interface routines (not directly coupled to one bif)
+*/
+
+
+/* Display tree contents (for dump) */
+static void db_print_catree(fmtfn_t to, void *to_arg,
+ int show, DbTable *tbl)
+{
+ DbTableCATree *tb = &tbl->catree;
+ DbTableCATreeNode *base = merge_to_one_locked_base_node(&tb->common);
+ db_print_tree_common(to, to_arg, show, base->baseOrRoute.base.root, tbl);
+ wunlock_base_node(&(base->baseOrRoute.base));
+}
+
+/* Release all memory occupied by a single table */
+static int db_free_table_catree(DbTable *tbl)
+{
+ while (db_free_table_continue_catree(tbl, ERTS_SWORD_MAX) < 0)
+ ;
+ return 1;
+}
+
+static SWord db_free_table_continue_catree(DbTable *tbl, SWord reds)
+{
+ DbTableCATreeNode *first_base_node;
+ DbTableCATree *tb = &tbl->catree;
+ if (!tb->deletion) {
+ tb->deletion = 1;
+ tb->free_stack_elems.array =
+ erts_db_alloc(ERTS_ALC_T_DB_STK,
+ (DbTable *) tb,
+ sizeof(TreeDbTerm *) * STACK_NEED);
+ tb->free_stack_elems.pos = 0;
+ tb->free_stack_elems.slot = 0;
+ tb->free_stack_rnodes.array =
+ erts_db_alloc(ERTS_ALC_T_DB_STK,
+ (DbTable *) tb,
+ sizeof(DbTableCATreeNode *) * STACK_NEED);
+ tb->free_stack_rnodes.pos = 0;
+ tb->free_stack_rnodes.size = STACK_NEED;
+ PUSH_NODE(&tb->free_stack_rnodes, GET_ROOT(tb));
+ tb->is_routing_nodes_freed = 0;
+ tb->base_nodes_to_free_list = NULL;
+ }
+ if ( ! tb->is_routing_nodes_freed ) {
+ reds = do_free_routing_nodes_catree_cont(tb, reds);
+ if (reds < 0) {
+ return reds; /* Not finished */
+ } else {
+ tb->is_routing_nodes_freed = 1; /* Ready with the routing nodes */
+ first_base_node = catree_first_base_node_from_free_list(tb);
+ PUSH_NODE(&tb->free_stack_elems, first_base_node->baseOrRoute.base.root);
+ }
+ }
+ while (catree_first_base_node_from_free_list(tb) != NULL) {
+ reds = do_free_base_node_cont(tb, reds);
+ if (reds < 0) {
+ return reds; /* Continue later */
+ }
+ }
+ /* Time to free the main structure*/
+ erts_db_free(ERTS_ALC_T_DB_STK,
+ (DbTable *) tb,
+ (void *) tb->free_stack_elems.array,
+ sizeof(TreeDbTerm *) * STACK_NEED);
+ erts_db_free(ERTS_ALC_T_DB_STK,
+ (DbTable *) tb,
+ (void *) tb->free_stack_rnodes.array,
+ sizeof(DbTableCATreeNode *) * STACK_NEED);
+ return 1;
+}
+
+static SWord db_delete_all_objects_catree(Process* p, DbTable* tbl, SWord reds)
+{
+ reds = db_free_table_continue_catree(tbl, reds);
+ if (reds < 0)
+ return reds;
+ db_create_catree(p, tbl);
+ erts_atomic_set_nob(&tbl->catree.common.nitems, 0);
+ return reds;
+}
+
+
+static void db_foreach_offheap_catree(DbTable *tbl,
+ void (*func)(ErlOffHeap *, void *),
+ void *arg)
+{
+ DbTableCATree *tb = &tbl->catree;
+ DbTableCATreeNode *base = merge_to_one_locked_base_node(&tb->common);
+ db_foreach_offheap_tree_common(base->baseOrRoute.base.root, func, arg);
+ wunlock_base_node(&(base->baseOrRoute.base));
+}
+
+static int db_lookup_dbterm_catree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
+ DbUpdateHandle *handle)
+{
+ DbTableCATree *tb = &tbl->catree;
+ int res;
+ ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_FIND_BASE_NODE_PART(wlock_base_node,wunlock_base_node);
+ res = db_lookup_dbterm_tree_common(p, tbl, &base_node->root, key, obj, handle, NULL);
+ if (res == 0) {
+ ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_ADAPT_AND_UNLOCK_PART;
+ } else {
+ /* db_finalize_dbterm_catree will unlock */
+ handle->lck = prev_node;
+ handle->lck2 = current_node;
+ handle->current_level = current_level;
+ }
+ return res;
+}
+
+static void db_finalize_dbterm_catree(int cret, DbUpdateHandle *handle)
+{
+ DbTableCATree *tb = &(handle->tb->catree);
+ DbTableCATreeNode *prev_node = handle->lck;
+ DbTableCATreeNode *current_node = handle->lck2;
+ int current_level = handle->current_level;
+ DbTableCATreeBaseNode *base_node = &current_node->baseOrRoute.base;
+ db_finalize_dbterm_tree_common(cret, handle, NULL);
+ ERL_DB_CATREE_CREATE_DO_OPERATION_FUNCTION_ADAPT_AND_UNLOCK_PART;
+ return;
+}
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+static void erts_lcnt_enable_db_catree_lock_count_helper(DbTableCATree *tb,
+ DbTableCATreeNode *node,
+ int enable)
+{
+ erts_lcnt_ref_t *lcnt_ref;
+ erts_lock_flags_t lock_type;
+ if (node->is_base_node) {
+ lcnt_ref = &GET_BASE_NODE_LOCK(node)->lcnt;
+ lock_type = ERTS_LOCK_TYPE_RWMUTEX;
+ } else {
+ erts_lcnt_enable_db_catree_lock_count_helper(tb, GET_LEFT(node), enable);
+ erts_lcnt_enable_db_catree_lock_count_helper(tb, GET_RIGHT(node), enable);
+ lcnt_ref = &GET_ROUTE_NODE_LOCK(node)->lcnt;
+ lock_type = ERTS_LOCK_TYPE_MUTEX;
+ }
+ if (enable) {
+ erts_lcnt_install_new_lock_info(lcnt_ref, "db_hash_slot", tb->common.the_name,
+ lock_type | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(lcnt_ref);
+ }
+}
+
+void erts_lcnt_enable_db_catree_lock_count(DbTableCATree *tb, int enable)
+{
+ erts_lcnt_enable_db_catree_lock_count_helper(tb, GET_ROOT(tb), enable);
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
+
+
+#ifdef HARDDEBUG
+
+/*
+ * Not called, but kept as it might come to use
+ */
+static inline int my_check_table_tree(TreeDbTerm *t)
+{
+ int lh, rh;
+ if (t == NULL)
+ return 0;
+ lh = my_check_table_tree(t->left);
+ rh = my_check_table_tree(t->right);
+ if ((rh - lh) != t->balance) {
+ erts_fprintf(stderr, "Invalid tree balance for this node:\n");
+ erts_fprintf(stderr,"balance = %d, left = 0x%08X, right = 0x%08X\n",
+ t->balance, t->left, t->right);
+ erts_fprintf(stderr,"\nDump:\n---------------------------------\n");
+ erts_fprintf(stderr,"\n---------------------------------\n");
+ abort();
+ }
+ return ((rh > lh) ? rh : lh) + 1;
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_db_catree.h b/erts/emulator/beam/erl_db_catree.h
new file mode 100644
index 0000000000..1f2c7da091
--- /dev/null
+++ b/erts/emulator/beam/erl_db_catree.h
@@ -0,0 +1,91 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Implementation of ETS ordered_set table type with
+ * fine-grained synchronization.
+ *
+ * Author: Kjell Winblad
+ *
+ * "erl_db_catree.c" contains more details about the implementation.
+ *
+ */
+
+#ifndef _DB_CATREE_H
+#define _DB_CATREE_H
+
+struct DbTableCATreeNode;
+
+typedef struct {
+ erts_rwmtx_t lock; /* The lock for this base node */
+ Sint lock_statistics;
+ int is_valid; /* If this base node is still valid */
+ TreeDbTerm *root; /* The root of the sequential tree */
+ DbTable * tab; /* Table ptr, used when freeing using thread progress */
+ ErtsThrPrgrLaterOp free_item; /* Used when freeing using thread progress */
+ struct DbTableCATreeNode * next; /* Used when gradually deleting */
+} DbTableCATreeBaseNode;
+
+typedef struct {
+ ErtsThrPrgrLaterOp free_item; /* Used when freeing using thread progress */
+ DbTable* tab; /* Table ptr, used when freeing using thread progress */
+ erts_mtx_t lock; /* Used when joining route nodes */
+ int is_valid; /* If this route node is still valid */
+ erts_atomic_t left;
+ erts_atomic_t right;
+ DbTerm key;
+} DbTableCATreeRouteNode;
+
+typedef struct DbTableCATreeNode {
+ int is_base_node;
+ union {
+ DbTableCATreeRouteNode route;
+ DbTableCATreeBaseNode base;
+ } baseOrRoute;
+} DbTableCATreeNode;
+
+typedef struct {
+ Uint pos; /* Current position on stack */
+ Uint size; /* The size of the stack array */
+ DbTableCATreeNode** array; /* The stack */
+} CATreeNodeStack;
+
+typedef struct db_table_catree {
+ DbTableCommon common;
+
+ /* CA Tree-specific fields */
+ erts_atomic_t root; /* The tree root (DbTableCATreeNode*) */
+ Uint deletion; /* Being deleted */
+ DbTreeStack free_stack_elems;/* Used for deletion ...*/
+ CATreeNodeStack free_stack_rnodes;
+ DbTableCATreeNode *base_nodes_to_free_list;
+ int is_routing_nodes_freed;
+} DbTableCATree;
+
+void db_initialize_catree(void);
+
+int db_create_catree(Process *p, DbTable *tbl);
+
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_catree_lock_count(DbTableCATree *tb, int enable);
+#endif
+
+#endif /* _DB_CATREE_H */
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 2eb874b005..f643344477 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -48,34 +48,13 @@
#include "erl_binary.h"
#include "erl_db_tree.h"
+#include "erl_db_tree_util.h"
#define GETKEY_WITH_POS(Keypos, Tplp) (*((Tplp) + Keypos))
#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems))
-/*
-** A stack of this size is enough for an AVL tree with more than
-** 0xFFFFFFFF elements. May be subject to change if
-** the datatype of the element counter is changed to a 64 bit integer.
-** The Maximal height of an AVL tree is calculated as:
-** h(n) <= 1.4404 * log(n + 2) - 0.328
-** Where n denotes the number of nodes, h(n) the height of the tree
-** with n nodes and log is the binary logarithm.
-*/
-
-#define STACK_NEED 50
#define TREE_MAX_ELEMENTS 0xFFFFFFFFUL
-#define PUSH_NODE(Dtt, Tdt) \
- ((Dtt)->array[(Dtt)->pos++] = Tdt)
-
-#define POP_NODE(Dtt) \
- (((Dtt)->pos) ? \
- (Dtt)->array[--((Dtt)->pos)] : NULL)
-
-#define TOP_NODE(Dtt) \
- ((Dtt->pos) ? \
- (Dtt)->array[(Dtt)->pos - 1] : NULL)
-
#define TOPN_NODE(Dtt, Pos) \
(((Pos) < Dtt->pos) ? \
(Dtt)->array[(Dtt)->pos - ((Pos) + 1)] : NULL)
@@ -89,9 +68,9 @@
/* Obtain table static stack if available. NULL if not.
** Must be released with release_stack()
*/
-static DbTreeStack* get_static_stack(DbTableTree* tb)
+ERTS_INLINE static DbTreeStack* get_static_stack(DbTableTree* tb)
{
- if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
+ if (tb != NULL && !erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
return &tb->static_stack;
}
return NULL;
@@ -100,13 +79,14 @@ static DbTreeStack* get_static_stack(DbTableTree* tb)
/* Obtain static stack if available, otherwise empty dynamic stack.
** Must be released with release_stack()
*/
-static DbTreeStack* get_any_stack(DbTableTree* tb)
+static DbTreeStack* get_any_stack(DbTable* tb, DbTableTree* stack_container)
{
DbTreeStack* stack;
- if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
- return &tb->static_stack;
+ if (stack_container != NULL &&
+ !erts_atomic_xchg_acqb(&stack_container->is_stack_busy, 1)) {
+ return &stack_container->static_stack;
}
- stack = erts_db_alloc(ERTS_ALC_T_DB_STK, (DbTable *) tb,
+ stack = erts_db_alloc(ERTS_ALC_T_DB_STK, tb,
sizeof(DbTreeStack) + sizeof(TreeDbTerm*) * STACK_NEED);
stack->pos = 0;
stack->slot = 0;
@@ -114,62 +94,59 @@ static DbTreeStack* get_any_stack(DbTableTree* tb)
return stack;
}
-static void release_stack(DbTableTree* tb, DbTreeStack* stack)
+static void release_stack(DbTable* tb, DbTableTree* stack_container, DbTreeStack* stack)
{
- if (stack == &tb->static_stack) {
- ASSERT(erts_atomic_read_nob(&tb->is_stack_busy) == 1);
- erts_atomic_set_relb(&tb->is_stack_busy, 0);
+ if (stack_container != NULL && stack == &stack_container->static_stack) {
+ ASSERT(erts_atomic_read_nob(&stack_container->is_stack_busy) == 1);
+ erts_atomic_set_relb(&stack_container->is_stack_busy, 0);
}
else {
- erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb,
+ erts_db_free(ERTS_ALC_T_DB_STK, tb,
(void *) stack, sizeof(DbTreeStack) + sizeof(TreeDbTerm*) * STACK_NEED);
}
}
-static ERTS_INLINE void reset_static_stack(DbTableTree* tb)
+static ERTS_INLINE void reset_stack(DbTreeStack* stack)
{
- tb->static_stack.pos = 0;
- tb->static_stack.slot = 0;
+ if (stack != NULL) {
+ stack->pos = 0;
+ stack->slot = 0;
+ }
}
-static ERTS_INLINE void free_term(DbTableTree *tb, TreeDbTerm* p)
+static ERTS_INLINE void reset_static_stack(DbTableTree* tb)
{
- db_free_term((DbTable*)tb, p, offsetof(TreeDbTerm, dbterm));
+ if (tb != NULL) {
+ reset_stack(&tb->static_stack);
+ }
}
-static ERTS_INLINE TreeDbTerm* new_dbterm(DbTableTree *tb, Eterm obj)
+static ERTS_INLINE TreeDbTerm* new_dbterm(DbTableCommon *tb, Eterm obj)
{
TreeDbTerm* p;
- if (tb->common.compress) {
- p = db_store_term_comp(&tb->common, NULL, offsetof(TreeDbTerm,dbterm), obj);
+ if (tb->compress) {
+ p = db_store_term_comp(tb, NULL, offsetof(TreeDbTerm,dbterm), obj);
}
else {
- p = db_store_term(&tb->common, NULL, offsetof(TreeDbTerm,dbterm), obj);
+ p = db_store_term(tb, NULL, offsetof(TreeDbTerm,dbterm), obj);
}
return p;
}
-static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableTree *tb, TreeDbTerm* old,
+static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableCommon *tb, TreeDbTerm* old,
Eterm obj)
{
TreeDbTerm* p;
ASSERT(old != NULL);
- if (tb->common.compress) {
- p = db_store_term_comp(&tb->common, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj);
+ if (tb->compress) {
+ p = db_store_term_comp(tb, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj);
}
else {
- p = db_store_term(&tb->common, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj);
+ p = db_store_term(tb, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj);
}
return p;
}
/*
-** Some macros for "direction stacks"
-*/
-#define DIR_LEFT 0
-#define DIR_RIGHT 1
-#define DIR_END 2
-
-/*
* Number of records to delete before trapping.
*/
#define DELETE_RECORD_LIMIT 12000
@@ -262,7 +239,9 @@ struct select_count_context {
*/
struct select_delete_context {
Process *p;
- DbTableTree *tb;
+ DbTableCommon *tb;
+ TreeDbTerm **root;
+ DbTreeStack *stack;
Uint accum;
Binary *mp;
Eterm end_condition;
@@ -277,7 +256,8 @@ struct select_delete_context {
*/
struct select_replace_context {
Process *p;
- DbTableTree *tb;
+ DbTableCommon *tb;
+ TreeDbTerm **root;
Binary *mp;
Eterm end_condition;
Eterm *lastobj;
@@ -292,73 +272,82 @@ typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Ete
/*
** Forward declarations
*/
-static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key);
-static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
- Eterm object);
+static TreeDbTerm *linkout_tree(DbTableCommon *tb, TreeDbTerm **root,
+ Eterm key, DbTreeStack *stack);
+static TreeDbTerm *linkout_object_tree(DbTableCommon *tb, TreeDbTerm **root,
+ Eterm object, DbTableTree *stack);
static SWord do_free_tree_continue(DbTableTree *tb, SWord reds);
-static void free_term(DbTableTree *tb, TreeDbTerm* p);
-static int balance_left(TreeDbTerm **this);
-static int balance_right(TreeDbTerm **this);
+static void free_term(DbTable *tb, TreeDbTerm* p);
+int tree_balance_left(TreeDbTerm **this);
+int tree_balance_right(TreeDbTerm **this);
static int delsub(TreeDbTerm **this);
-static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot);
-static TreeDbTerm *find_node(DbTableTree *tb, Eterm key);
-static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key);
-static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack*, TreeDbTerm *this);
-static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack*, Eterm key);
-static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack*, Eterm key);
-static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack*,
- Eterm key);
-static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack*,
- Eterm key);
-static void traverse_backwards(DbTableTree *tb,
+static TreeDbTerm *slot_search(Process *p, TreeDbTerm *root, Sint slot,
+ DbTable *tb, DbTableTree *stack_container);
+static TreeDbTerm *find_node(DbTableCommon *tb, TreeDbTerm *root,
+ Eterm key, DbTableTree *stack_container);
+static TreeDbTerm **find_node2(DbTableCommon *tb, TreeDbTerm **root, Eterm key);
+static TreeDbTerm **find_ptr(DbTableCommon *tb, TreeDbTerm **root,
+ DbTreeStack *stack, TreeDbTerm *this);
+static TreeDbTerm *find_next(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key);
+static TreeDbTerm *find_prev(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key);
+static TreeDbTerm *find_next_from_pb_key(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key);
+static TreeDbTerm *find_prev_from_pb_key(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key);
+static void traverse_backwards(DbTableCommon *tb,
+ TreeDbTerm **root,
DbTreeStack*,
Eterm lastkey,
- int (*doit)(DbTableTree *tb,
+ int (*doit)(DbTableCommon *,
TreeDbTerm *,
void *,
int),
void *context);
-static void traverse_forward(DbTableTree *tb,
+static void traverse_forward(DbTableCommon *tb,
+ TreeDbTerm **root,
DbTreeStack*,
Eterm lastkey,
- int (*doit)(DbTableTree *tb,
+ int (*doit)(DbTableCommon *tb,
TreeDbTerm *,
void *,
int),
void *context);
-static void traverse_update_backwards(DbTableTree *tb,
+static void traverse_update_backwards(DbTableCommon *tb,
+ TreeDbTerm **root,
DbTreeStack*,
Eterm lastkey,
- int (*doit)(DbTableTree *tb,
+ int (*doit)(DbTableCommon *tb,
TreeDbTerm **, // out
void *,
int),
void *context);
-static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret,
- Eterm *partly_bound_key);
+static int key_given(DbTableCommon *tb, TreeDbTerm **root, Eterm pattern,
+ TreeDbTerm ***ret, Eterm *partly_bound);
static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key);
static Sint do_cmp_partly_bound(Eterm a, Eterm b, int *done);
-static int analyze_pattern(DbTableTree *tb, Eterm pattern,
+static int analyze_pattern(DbTableCommon *tb, TreeDbTerm **root, Eterm pattern,
extra_match_validator_t extra_validator, /* Optional callback */
struct mp_info *mpi);
-static int doit_select(DbTableTree *tb,
+static int doit_select(DbTableCommon *tb,
TreeDbTerm *this,
void *ptr,
int forward);
-static int doit_select_count(DbTableTree *tb,
+static int doit_select_count(DbTableCommon *tb,
TreeDbTerm *this,
void *ptr,
int forward);
-static int doit_select_chunk(DbTableTree *tb,
+static int doit_select_chunk(DbTableCommon *tb,
TreeDbTerm *this,
void *ptr,
int forward);
-static int doit_select_delete(DbTableTree *tb,
+static int doit_select_delete(DbTableCommon *tb,
TreeDbTerm *this,
void *ptr,
int forward);
-static int doit_select_replace(DbTableTree *tb,
+static int doit_select_replace(DbTableCommon *tb,
TreeDbTerm **this_ptr,
void *ptr,
int forward);
@@ -508,18 +497,18 @@ int db_create_tree(Process *p, DbTable *tbl)
return DB_ERROR_NONE;
}
-static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret)
+int db_first_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm *ret, DbTableTree *stack_container)
{
- DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
TreeDbTerm *this;
- if (( this = tb->root ) == NULL) {
+ if (( this = root ) == NULL) {
*ret = am_EOT;
return DB_ERROR_NONE;
}
/* Walk down the tree to the left */
- if ((stack = get_static_stack(tb)) != NULL) {
+ if ((stack = get_static_stack(stack_container)) != NULL) {
stack->pos = stack->slot = 0;
}
while (this->left != NULL) {
@@ -529,23 +518,27 @@ static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret)
if (stack) {
PUSH_NODE(stack, this);
stack->slot = 1;
- release_stack(tb,stack);
+ release_stack(tbl,stack_container,stack);
}
*ret = db_copy_key(p, tbl, &this->dbterm);
return DB_ERROR_NONE;
}
-static int db_next_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
- DbTreeStack* stack;
+ return db_first_tree_common(p, tbl, tb->root, ret, tb);
+}
+
+int db_next_tree_common(Process *p, DbTable *tbl,
+ TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTreeStack* stack)
+{
TreeDbTerm *this;
if (is_atom(key) && key == am_EOT)
return DB_ERROR_BADKEY;
- stack = get_any_stack(tb);
- this = find_next(tb, stack, key);
- release_stack(tb,stack);
+ this = find_next(&tbl->common, root, stack, key);
if (this == NULL) {
*ret = am_EOT;
return DB_ERROR_NONE;
@@ -554,18 +547,27 @@ static int db_next_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
return DB_ERROR_NONE;
}
-static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret)
+static int db_next_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack = get_any_stack(tbl, tb);
+ int ret_val = db_next_tree_common(p, tbl, tb->root, key, ret, stack);
+ release_stack(tbl,tb,stack);
+ return ret_val;
+}
+
+int db_last_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm *ret, DbTableTree *stack_container)
+{
TreeDbTerm *this;
DbTreeStack* stack;
- if (( this = tb->root ) == NULL) {
+ if (( this = root ) == NULL) {
*ret = am_EOT;
return DB_ERROR_NONE;
}
/* Walk down the tree to the right */
- if ((stack = get_static_stack(tb)) != NULL) {
+ if ((stack = get_static_stack(stack_container)) != NULL) {
stack->pos = stack->slot = 0;
}
while (this->right != NULL) {
@@ -574,24 +576,27 @@ static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret)
}
if (stack) {
PUSH_NODE(stack, this);
- stack->slot = NITEMS(tb);
- release_stack(tb,stack);
+ stack->slot = NITEMS(tbl);
+ release_stack(tbl,stack_container,stack);
}
*ret = db_copy_key(p, tbl, &this->dbterm);
return DB_ERROR_NONE;
}
-static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_last_tree_common(p, tbl, tb->root, ret, tb);
+}
+
+int db_prev_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTreeStack* stack)
+{
TreeDbTerm *this;
- DbTreeStack* stack;
if (is_atom(key) && key == am_EOT)
return DB_ERROR_BADKEY;
- stack = get_any_stack(tb);
- this = find_prev(tb, stack, key);
- release_stack(tb,stack);
+ this = find_prev(&tbl->common, root, stack, key);
if (this == NULL) {
*ret = am_EOT;
return DB_ERROR_NONE;
@@ -600,25 +605,30 @@ static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
return DB_ERROR_NONE;
}
-static ERTS_INLINE Sint cmp_key(DbTableTree* tb, Eterm key, TreeDbTerm* obj) {
- return CMP(key, GETKEY(tb,obj->dbterm.tpl));
+static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack = get_any_stack(tbl, tb);
+ int res = db_prev_tree_common(p, tbl, tb->root, key, ret, stack);
+ release_stack(tbl,tb,stack);
+ return res;
}
-static ERTS_INLINE int cmp_key_eq(DbTableTree* tb, Eterm key, TreeDbTerm* obj) {
+static ERTS_INLINE int cmp_key_eq(DbTableCommon* tb, Eterm key, TreeDbTerm* obj) {
Eterm obj_key = GETKEY(tb,obj->dbterm.tpl);
return is_same(key, obj_key) || CMP(key, obj_key) == 0;
}
-static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
+int db_put_tree_common(DbTableCommon *tb, TreeDbTerm **root, Eterm obj,
+ int key_clash_fail, DbTableTree *stack_container)
{
- DbTableTree *tb = &tbl->tree;
/* Non recursive insertion in AVL tree, building our own stack */
TreeDbTerm **tstack[STACK_NEED];
int tpos = 0;
int dstack[STACK_NEED+1];
int dpos = 0;
int state = 0;
- TreeDbTerm **this = &tb->root;
+ TreeDbTerm **this = root;
Sint c;
Eterm key;
int dir;
@@ -626,14 +636,14 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
key = GETKEY(tb, tuple_val(obj));
- reset_static_stack(tb);
+ reset_static_stack(stack_container);
dstack[dpos++] = DIR_END;
for (;;)
if (!*this) { /* Found our place */
state = 1;
- if (erts_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) {
- erts_atomic_dec_nob(&tb->common.nitems);
+ if (erts_atomic_inc_read_nob(&tb->nitems) >= TREE_MAX_ELEMENTS) {
+ erts_atomic_dec_nob(&tb->nitems);
return DB_ERROR_SYSRES;
}
*this = new_dbterm(tb, obj);
@@ -724,9 +734,15 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
return DB_ERROR_NONE;
}
-static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
{
DbTableTree *tb = &tbl->tree;
+ return db_put_tree_common(&tb->common, &tb->root, obj, key_clash_fail, tb);
+}
+
+int db_get_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTableTree *stack_container)
+{
Eterm copy;
Eterm *hp, *hend;
TreeDbTerm *this;
@@ -737,13 +753,13 @@ static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
* The list created around it is purely for interface conformance.
*/
- this = find_node(tb,key);
+ this = find_node(tb,root,key,stack_container);
if (this == NULL) {
*ret = NIL;
} else {
hp = HAlloc(p, this->dbterm.size + 2);
hend = hp + this->dbterm.size + 2;
- copy = db_copy_object_from_ets(&tb->common, &this->dbterm, &hp, &MSO(p));
+ copy = db_copy_object_from_ets(tb, &this->dbterm, &hp, &MSO(p));
*ret = CONS(hp, copy, NIL);
hp += 2;
HRelease(p,hend,hp);
@@ -751,18 +767,28 @@ static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
return DB_ERROR_NONE;
}
-static int db_member_tree(DbTable *tbl, Eterm key, Eterm *ret)
+static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_get_tree_common(p, &tb->common, tb->root, key, ret, tb);
+}
- *ret = (find_node(tb,key) == NULL) ? am_false : am_true;
+int db_member_tree_common(DbTableCommon *tb, TreeDbTerm *root, Eterm key, Eterm *ret,
+ DbTableTree *stack_container)
+{
+ *ret = (find_node(tb,root,key,stack_container) == NULL) ? am_false : am_true;
return DB_ERROR_NONE;
}
-static int db_get_element_tree(Process *p, DbTable *tbl,
- Eterm key, int ndex, Eterm *ret)
+static int db_member_tree(DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_member_tree_common(&tb->common, tb->root, key, ret, tb);
+}
+
+int db_get_element_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key,
+ int ndex, Eterm *ret, DbTableTree *stack_container)
+{
/*
* Look the node up:
*/
@@ -776,49 +802,68 @@ static int db_get_element_tree(Process *p, DbTable *tbl,
* around the element here either.
*/
- this = find_node(tb,key);
+ this = find_node(tb,root,key,stack_container);
if (this == NULL) {
return DB_ERROR_BADKEY;
} else {
if (ndex > arityval(this->dbterm.tpl[0])) {
return DB_ERROR_BADPARAM;
}
- *ret = db_copy_element_from_ets(&tb->common, p, &this->dbterm, ndex, &hp, 0);
+ *ret = db_copy_element_from_ets(tb, p, &this->dbterm, ndex, &hp, 0);
}
return DB_ERROR_NONE;
}
-static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret)
+static int db_get_element_tree(Process *p, DbTable *tbl,
+ Eterm key, int ndex, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_get_element_tree_common(p, &tb->common, tb->root, key,
+ ndex, ret, tb);
+}
+
+int db_erase_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm key, Eterm *ret,
+ DbTreeStack *stack /* NULL if no static stack */)
+{
TreeDbTerm *res;
*ret = am_true;
- if ((res = linkout_tree(tb, key)) != NULL) {
- free_term(tb, res);
+ if ((res = linkout_tree(&tbl->common, root,key, stack)) != NULL) {
+ free_term(tbl, res);
}
return DB_ERROR_NONE;
}
-static int db_erase_object_tree(DbTable *tbl, Eterm object, Eterm *ret)
+static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_erase_tree_common(tbl, &tb->root, key, ret, &tb->static_stack);
+}
+
+int db_erase_object_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm object,
+ Eterm *ret, DbTableTree *stack_container)
+{
TreeDbTerm *res;
*ret = am_true;
- if ((res = linkout_object_tree(tb, object)) != NULL) {
- free_term(tb, res);
+ if ((res = linkout_object_tree(&tbl->common, root, object, stack_container)) != NULL) {
+ free_term(tbl, res);
}
return DB_ERROR_NONE;
}
-
-static int db_slot_tree(Process *p, DbTable *tbl,
- Eterm slot_term, Eterm *ret)
+static int db_erase_object_tree(DbTable *tbl, Eterm object, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_erase_object_tree_common(tbl, &tb->root, object, ret, tb);
+}
+
+int db_slot_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm slot_term, Eterm *ret,
+ DbTableTree *stack_container)
+{
Sint slot;
TreeDbTerm *st;
Eterm *hp, *hend;
@@ -834,10 +879,10 @@ static int db_slot_tree(Process *p, DbTable *tbl,
if (is_not_small(slot_term) ||
((slot = signed_val(slot_term)) < 0) ||
- (slot > NITEMS(tb)))
+ (slot > NITEMS(tbl)))
return DB_ERROR_BADPARAM;
- if (slot == NITEMS(tb)) {
+ if (slot == NITEMS(tbl)) {
*ret = am_EOT;
return DB_ERROR_NONE;
}
@@ -847,20 +892,27 @@ static int db_slot_tree(Process *p, DbTable *tbl,
* are counted from 1 and up.
*/
++slot;
- st = slot_search(p, tb, slot);
+ st = slot_search(p, root, slot, tbl, stack_container);
if (st == NULL) {
*ret = am_false;
return DB_ERROR_UNSPEC;
}
hp = HAlloc(p, st->dbterm.size + 2);
hend = hp + st->dbterm.size + 2;
- copy = db_copy_object_from_ets(&tb->common, &st->dbterm, &hp, &MSO(p));
+ copy = db_copy_object_from_ets(&tbl->common, &st->dbterm, &hp, &MSO(p));
*ret = CONS(hp, copy, NIL);
hp += 2;
HRelease(p,hend,hp);
return DB_ERROR_NONE;
}
+static int db_slot_tree(Process *p, DbTable *tbl,
+ Eterm slot_term, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ return db_slot_tree_common(p, tbl, tb->root, slot_term, ret, tb);
+}
+
static BIF_RETTYPE ets_select_reverse(BIF_ALIST_3)
@@ -926,19 +978,14 @@ static BIF_RETTYPE bif_trap3(Export *bif,
{
BIF_TRAP3(bif, p, p1, p2, p3);
}
-
-/*
-** This is called either when the select bif traps or when ets:select/1
-** is called. It does mostly the same as db_select_tree and may in either case
-** trap to itself again (via the ets:select/1 bif).
-** Note that this is common for db_select_tree and db_select_chunk_tree.
-*/
-static int db_select_continue_tree(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+
+int db_select_continue_tree_common(Process *p,
+ DbTableCommon *tb,
+ TreeDbTerm **root,
+ Eterm continuation,
+ Eterm *ret,
+ DbTableTree *stack_container)
{
- DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
struct select_context sc;
unsigned sz;
@@ -980,26 +1027,26 @@ static int db_select_continue_tree(Process *p,
sc.end_condition = NIL;
sc.lastobj = NULL;
sc.max = 1000;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tb->keypos;
sc.chunk_size = chunk_size;
reverse = unsigned_val(tptr[7]);
sc.got = signed_val(tptr[8]);
- stack = get_any_stack(tb);
+ stack = get_any_stack((DbTable*)tb, stack_container);
if (chunk_size) {
if (reverse) {
- traverse_backwards(tb, stack, lastkey, &doit_select_chunk, &sc);
+ traverse_backwards(tb, root, stack, lastkey, &doit_select_chunk, &sc);
} else {
- traverse_forward(tb, stack, lastkey, &doit_select_chunk, &sc);
+ traverse_forward(tb, root, stack, lastkey, &doit_select_chunk, &sc);
}
} else {
if (reverse) {
- traverse_forward(tb, stack, lastkey, &doit_select, &sc);
+ traverse_forward(tb, root, stack, lastkey, &doit_select, &sc);
} else {
- traverse_backwards(tb, stack, lastkey, &doit_select, &sc);
+ traverse_backwards(tb, root, stack, lastkey, &doit_select, &sc);
}
}
- release_stack(tb,stack);
+ release_stack((DbTable*)tb,stack_container,stack);
BUMP_REDS(p, 1000 - sc.max);
@@ -1082,13 +1129,28 @@ static int db_select_continue_tree(Process *p,
#undef RET_TO_BIF
}
+
+/*
+** This is called either when the select bif traps or when ets:select/1
+** is called. It does mostly the same as db_select_tree and may in either case
+** trap to itself again (via the ets:select/1 bif).
+** Note that this is common for db_select_tree and db_select_chunk_tree.
+*/
+static int db_select_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ return db_select_continue_tree_common(p, &tb->common, &tb->root,
+ continuation, ret, tb);
+}
-
-static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, int reverse, Eterm *ret)
+int db_select_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root,
+ Eterm tid, Eterm pattern, int reverse, Eterm *ret,
+ DbTableTree *stack_container)
{
/* Strategy: Traverse backwards to build resulting list from tail to head */
- DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
struct select_context sc;
struct mp_info mpi;
@@ -1117,11 +1179,11 @@ static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
sc.p = p;
sc.max = 1000;
sc.end_condition = NIL;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tb->keypos;
sc.got = 0;
sc.chunk_size = 0;
- if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, root, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1138,25 +1200,25 @@ static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
RET_TO_BIF(sc.accum,DB_ERROR_NONE);
}
- stack = get_any_stack(tb);
+ stack = get_any_stack((DbTable*)tb,stack_container);
if (reverse) {
if (mpi.some_limitation) {
- if ((this = find_prev_from_pb_key(tb, stack, mpi.least)) != NULL) {
+ if ((this = find_prev_from_pb_key(tb, *root, stack, mpi.least)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
}
sc.end_condition = mpi.most;
}
- traverse_forward(tb, stack, lastkey, &doit_select, &sc);
+ traverse_forward(tb, root, stack, lastkey, &doit_select, &sc);
} else {
if (mpi.some_limitation) {
- if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
+ if ((this = find_next_from_pb_key(tb, *root, stack, mpi.most)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
}
sc.end_condition = mpi.least;
}
- traverse_backwards(tb, stack, lastkey, &doit_select, &sc);
+ traverse_backwards(tb, root, stack, lastkey, &doit_select, &sc);
}
- release_stack(tb,stack);
+ release_stack((DbTable*)tb,stack_container,stack);
#ifdef HARDDEBUG
erts_fprintf(stderr,"Least: %T\n", mpi.least);
erts_fprintf(stderr,"Most: %T\n", mpi.most);
@@ -1192,16 +1254,21 @@ static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
}
-
-/*
-** This is called either when the select_count bif traps.
-*/
-static int db_select_count_continue_tree(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, int reverse, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_select_tree_common(p, &tb->common, &tb->root, tid,
+ pattern, reverse, ret, tb);
+}
+
+int db_select_count_continue_tree_common(Process *p,
+ DbTableCommon *tb,
+ TreeDbTerm **root,
+ Eterm continuation,
+ Eterm *ret,
+ DbTableTree *stack_container)
+{
DbTreeStack* stack;
struct select_count_context sc;
unsigned sz;
@@ -1238,16 +1305,16 @@ static int db_select_count_continue_tree(Process *p,
sc.end_condition = NIL;
sc.lastobj = NULL;
sc.max = 1000;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tb->keypos;
if (is_big(tptr[5])) {
sc.got = big_to_uint32(tptr[5]);
} else {
sc.got = unsigned_val(tptr[5]);
}
- stack = get_any_stack(tb);
- traverse_backwards(tb, stack, lastkey, &doit_select_count, &sc);
- release_stack(tb,stack);
+ stack = get_any_stack((DbTable*)tb, stack_container);
+ traverse_backwards(tb, root, stack, lastkey, &doit_select_count, &sc);
+ release_stack((DbTable*)tb,stack_container,stack);
BUMP_REDS(p, 1000 - sc.max);
@@ -1285,11 +1352,24 @@ static int db_select_count_continue_tree(Process *p,
#undef RET_TO_BIF
}
-
-static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret)
+/*
+** This is called either when the select_count bif traps.
+*/
+static int db_select_count_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_select_count_continue_tree_common(p, &tb->common, &tb->root,
+ continuation, ret, tb);
+}
+
+
+int db_select_count_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root,
+ Eterm tid, Eterm pattern, Eterm *ret,
+ DbTableTree *stack_container)
+{
DbTreeStack* stack;
struct select_count_context sc;
struct mp_info mpi;
@@ -1318,10 +1398,10 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
sc.p = p;
sc.max = 1000;
sc.end_condition = NIL;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tb->keypos;
sc.got = 0;
- if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, root, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1338,16 +1418,16 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE);
}
- stack = get_any_stack(tb);
+ stack = get_any_stack((DbTable*)tb, stack_container);
if (mpi.some_limitation) {
- if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
+ if ((this = find_next_from_pb_key(tb, *root, stack, mpi.most)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
}
sc.end_condition = mpi.least;
}
- traverse_backwards(tb, stack, lastkey, &doit_select_count, &sc);
- release_stack(tb,stack);
+ traverse_backwards(tb, root, stack, lastkey, &doit_select_count, &sc);
+ release_stack((DbTable*)tb,stack_container,stack);
BUMP_REDS(p, 1000 - sc.max);
if (sc.max > 0) {
RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE);
@@ -1383,12 +1463,20 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
}
-static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Sint chunk_size,
- int reverse,
- Eterm *ret)
+static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_select_count_tree_common(p, &tb->common, &tb->root,
+ tid, pattern, ret, tb);
+}
+
+
+int db_select_chunk_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root,
+ Eterm tid, Eterm pattern, Sint chunk_size,
+ int reverse, Eterm *ret,
+ DbTableTree *stack_container)
+{
DbTreeStack* stack;
struct select_context sc;
struct mp_info mpi;
@@ -1417,11 +1505,11 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
sc.p = p;
sc.max = 1000;
sc.end_condition = NIL;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tb->keypos;
sc.got = 0;
sc.chunk_size = chunk_size;
- if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, root, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1443,25 +1531,25 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
}
}
- stack = get_any_stack(tb);
+ stack = get_any_stack((DbTable*)tb,stack_container);
if (reverse) {
if (mpi.some_limitation) {
- if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
+ if ((this = find_next_from_pb_key(tb, *root, stack, mpi.most)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
}
sc.end_condition = mpi.least;
}
- traverse_backwards(tb, stack, lastkey, &doit_select_chunk, &sc);
+ traverse_backwards(tb, root, stack, lastkey, &doit_select_chunk, &sc);
} else {
if (mpi.some_limitation) {
- if ((this = find_prev_from_pb_key(tb, stack, mpi.least)) != NULL) {
+ if ((this = find_prev_from_pb_key(tb, *root, stack, mpi.least)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
}
sc.end_condition = mpi.most;
}
- traverse_forward(tb, stack, lastkey, &doit_select_chunk, &sc);
+ traverse_forward(tb, root, stack, lastkey, &doit_select_chunk, &sc);
}
- release_stack(tb,stack);
+ release_stack((DbTable*)tb,stack_container,stack);
BUMP_REDS(p, 1000 - sc.max);
if (sc.max > 0 || sc.got == chunk_size) {
@@ -1530,15 +1618,25 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
}
-/*
-** This is called when select_delete traps
-*/
-static int db_select_delete_continue_tree(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Sint chunk_size,
+ int reverse,
+ Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_select_chunk_tree_common(p, &tb->common, &tb->root,
+ tid, pattern, chunk_size,
+ reverse, ret, tb);
+}
+
+
+int db_select_delete_continue_tree_common(Process *p,
+ DbTable *tbl,
+ TreeDbTerm **root,
+ Eterm continuation,
+ Eterm *ret,
+ DbTreeStack* stack)
+{
struct select_delete_context sc;
unsigned sz;
Eterm *hp;
@@ -1552,7 +1650,7 @@ static int db_select_delete_continue_tree(Process *p,
#define RET_TO_BIF(Term, State) do { \
if (sc.erase_lastterm) { \
- free_term(tb, sc.lastterm); \
+ free_term(tbl, sc.lastterm); \
} \
*ret = (Term); \
return State; \
@@ -1571,7 +1669,9 @@ static int db_select_delete_continue_tree(Process *p,
mp = erts_db_get_match_prog_binary_unchecked(tptr[4]);
sc.p = p;
- sc.tb = tb;
+ sc.tb = &tbl->common;
+ sc.root = root;
+ sc.stack = stack;
if (is_big(tptr[5])) {
sc.accum = big_to_uint32(tptr[5]);
} else {
@@ -1580,17 +1680,16 @@ static int db_select_delete_continue_tree(Process *p,
sc.mp = mp;
sc.end_condition = NIL;
sc.max = 1000;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tbl->common.keypos;
- ASSERT(!erts_atomic_read_nob(&tb->is_stack_busy));
- traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc);
+ traverse_backwards(&tbl->common, root, stack, lastkey, &doit_select_delete, &sc);
BUMP_REDS(p, 1000 - sc.max);
if (sc.max > 0) {
RET_TO_BIF(erts_make_integer(sc.accum, p), DB_ERROR_NONE);
}
- key = GETKEY(tb, (sc.lastterm)->dbterm.tpl);
+ key = GETKEY(&tbl->common, (sc.lastterm)->dbterm.tpl);
if (end_condition != NIL &&
cmp_partly_bound(end_condition,key) > 0) { /* done anyway */
RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE);
@@ -1620,10 +1719,24 @@ static int db_select_delete_continue_tree(Process *p,
#undef RET_TO_BIF
}
-static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret)
+static int db_select_delete_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ ASSERT(!erts_atomic_read_nob(&tb->is_stack_busy));
+ return db_select_delete_continue_tree_common(p, tbl, &tb->root,
+ continuation, ret,
+ &tb->static_stack);
+}
+
+int db_select_delete_tree_common(Process *p, DbTable *tbl,
+ TreeDbTerm **root,
+ Eterm tid, Eterm pattern,
+ Eterm *ret,
+ DbTreeStack* stack)
+{
struct select_delete_context sc;
struct mp_info mpi;
Eterm lastkey = THE_NON_VALUE;
@@ -1641,7 +1754,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
erts_bin_free(mpi.mp); \
} \
if (sc.erase_lastterm) { \
- free_term(tb, sc.lastterm); \
+ free_term(tbl, sc.lastterm); \
} \
*ret = (Term); \
return RetVal; \
@@ -1655,10 +1768,12 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
sc.p = p;
sc.max = 1000;
sc.end_condition = NIL;
- sc.keypos = tb->common.keypos;
- sc.tb = tb;
+ sc.keypos = tbl->common.keypos;
+ sc.tb = &tbl->common;
+ sc.root = root;
+ sc.stack = stack;
- if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(&tbl->common, root, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(0,errcode);
}
@@ -1671,26 +1786,26 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
- doit_select_delete(tb,*(mpi.save_term),&sc, 0 /* direction doesn't
+ doit_select_delete(&tbl->common,*(mpi.save_term),&sc, 0 /* direction doesn't
matter */);
RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE);
}
if (mpi.some_limitation) {
- if ((this = find_next_from_pb_key(tb, &tb->static_stack, mpi.most)) != NULL) {
- lastkey = GETKEY(tb, this->dbterm.tpl);
+ if ((this = find_next_from_pb_key(&tbl->common, *root, stack, mpi.most)) != NULL) {
+ lastkey = GETKEY(&tbl->common, this->dbterm.tpl);
}
sc.end_condition = mpi.least;
}
- traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc);
+ traverse_backwards(&tbl->common, root, stack, lastkey, &doit_select_delete, &sc);
BUMP_REDS(p, 1000 - sc.max);
if (sc.max > 0) {
RET_TO_BIF(erts_make_integer(sc.accum,p), DB_ERROR_NONE);
}
- key = GETKEY(tb, (sc.lastterm)->dbterm.tpl);
+ key = GETKEY(&tbl->common, (sc.lastterm)->dbterm.tpl);
sz = size_object(key);
if (IS_USMALL(0, sc.accum)) {
hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
@@ -1714,7 +1829,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
/* Don't free mpi.mp, so don't use macro */
if (sc.erase_lastterm) {
- free_term(tb, sc.lastterm);
+ free_term(tbl, sc.lastterm);
}
*ret = bif_trap1(&ets_select_delete_continue_exp, p, continuation);
return DB_ERROR_NONE;
@@ -1723,12 +1838,21 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
}
-static int db_select_replace_continue_tree(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_select_delete_tree_common(p, tbl, &tb->root, tid, pattern, ret,
+ &tb->static_stack);
+}
+
+int db_select_replace_continue_tree_common(Process *p,
+ DbTableCommon *tb,
+ TreeDbTerm **root,
+ Eterm continuation,
+ Eterm *ret,
+ DbTableTree *stack_container)
+{
DbTreeStack* stack;
struct select_replace_context sc;
unsigned sz;
@@ -1764,7 +1888,7 @@ static int db_select_replace_continue_tree(Process *p,
sc.end_condition = NIL;
sc.lastobj = NULL;
sc.max = 1000;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tb->keypos;
if (is_big(tptr[5])) {
sc.replaced = big_to_uint32(tptr[5]);
} else {
@@ -1772,9 +1896,9 @@ static int db_select_replace_continue_tree(Process *p,
}
prev_replaced = sc.replaced;
- stack = get_any_stack(tb);
- traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc);
- release_stack(tb,stack);
+ stack = get_any_stack((DbTable*)tb,stack_container);
+ traverse_update_backwards(tb, root, stack, lastkey, &doit_select_replace, &sc);
+ release_stack((DbTable*)tb,stack_container,stack);
// the more objects we've replaced, the more reductions we've consumed
BUMP_REDS(p, MIN(2000, (1000 - sc.max) + (sc.replaced - prev_replaced)));
@@ -1813,10 +1937,20 @@ static int db_select_replace_continue_tree(Process *p,
#undef RET_TO_BIF
}
-static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret)
+static int db_select_replace_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_select_replace_continue_tree_common(p, &tb->common, &tb->root,
+ continuation, ret, tb);
+}
+
+int db_select_replace_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root,
+ Eterm tid, Eterm pattern, Eterm *ret,
+ DbTableTree *stack_container)
+{
DbTreeStack* stack;
struct select_replace_context sc;
struct mp_info mpi;
@@ -1844,12 +1978,13 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
sc.lastobj = NULL;
sc.p = p;
sc.tb = tb;
+ sc.root = root;
sc.max = 1000;
sc.end_condition = NIL;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tb->keypos;
sc.replaced = 0;
- if ((errcode = analyze_pattern(tb, pattern, db_match_keeps_key, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, root, pattern, db_match_keeps_key, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1860,7 +1995,7 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
sc.mp = mpi.mp;
- stack = get_static_stack(tb);
+ stack = get_static_stack(stack_container);
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
TreeDbTerm* term = *(mpi.save_term);
@@ -1869,23 +2004,23 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
if (TOP_NODE(stack) == term)
// throw away potentially invalid reference
REPLACE_TOP_NODE(stack, *(mpi.save_term));
- release_stack(tb, stack);
+ release_stack((DbTable*)tb,stack_container, stack);
}
RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE);
}
if (stack == NULL)
- stack = get_any_stack(tb);
+ stack = get_any_stack((DbTable*)tb,stack_container);
if (mpi.some_limitation) {
- if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
+ if ((this = find_next_from_pb_key(tb, *root, stack, mpi.most)) != NULL) {
lastkey = GETKEY(tb, this->dbterm.tpl);
}
sc.end_condition = mpi.least;
}
- traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc);
- release_stack(tb,stack);
+ traverse_update_backwards(tb, root, stack, lastkey, &doit_select_replace, &sc);
+ release_stack((DbTable*)tb,stack_container,stack);
// the more objects we've replaced, the more reductions we've consumed
BUMP_REDS(p, MIN(2000, (1000 - sc.max) + sc.replaced));
if (sc.max > 0) {
@@ -1922,52 +2057,73 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
}
-static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_select_replace_tree_common(p, &tb->common, &tb->root,
+ tid, pattern, ret, tb);
+}
+
+int db_take_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root,
+ Eterm key, Eterm *ret,
+ DbTreeStack *stack /* NULL if no static stack */)
+{
TreeDbTerm *this;
*ret = NIL;
- this = linkout_tree(tb, key);
+ this = linkout_tree(&tbl->common, root, key, stack);
if (this) {
Eterm copy, *hp, *hend;
hp = HAlloc(p, this->dbterm.size + 2);
hend = hp + this->dbterm.size + 2;
- copy = db_copy_object_from_ets(&tb->common,
+ copy = db_copy_object_from_ets(&tbl->common,
&this->dbterm, &hp, &MSO(p));
*ret = CONS(hp, copy, NIL);
hp += 2;
HRelease(p, hend, hp);
- free_term(tb, this);
+ free_term(tbl, this);
}
return DB_ERROR_NONE;
}
+static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ return db_take_tree_common(p, tbl, &tb->root,
+ key, ret, &tb->static_stack);
+}
+
/*
** Other interface routines (not directly coupled to one bif)
*/
-
-/* Display tree contents (for dump) */
-static void db_print_tree(fmtfn_t to, void *to_arg,
- int show,
- DbTable *tbl)
+void db_print_tree_common(fmtfn_t to, void *to_arg,
+ int show, TreeDbTerm *root, DbTable *tbl)
{
- DbTableTree *tb = &tbl->tree;
#ifdef TREE_DEBUG
if (show)
erts_print(to, to_arg, "\nTree data dump:\n"
"------------------------------------------------\n");
- do_dump_tree2(&tbl->tree, to, to_arg, show, tb->root, 0);
+ do_dump_tree2(&tbl->common, to, to_arg, show, root, 0);
if (show)
erts_print(to, to_arg, "\n"
"------------------------------------------------\n");
#else
- erts_print(to, to_arg, "Ordered set (AVL tree), Elements: %d\n", NITEMS(tb));
+ erts_print(to, to_arg, "Ordered set (AVL tree), Elements: %d\n", NITEMS(tbl));
#endif
}
+/* Display tree contents (for dump) */
+static void db_print_tree(fmtfn_t to, void *to_arg,
+ int show,
+ DbTable *tbl)
+{
+ DbTableTree *tb = &tbl->tree;
+ db_print_tree_common(to, to_arg, show, tb->root, tbl);
+}
+
/* release all memory occupied by a single table */
static int db_free_empty_table_tree(DbTable *tbl)
{
@@ -1992,8 +2148,10 @@ static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds)
(DbTable *) tb,
(void *) tb->static_stack.array,
sizeof(TreeDbTerm *) * STACK_NEED);
- ASSERT(erts_atomic_read_nob(&tb->common.memory_size)
- == sizeof(DbTable));
+ ASSERT((erts_atomic_read_nob(&tb->common.memory_size)
+ == sizeof(DbTable)) ||
+ (erts_atomic_read_nob(&tb->common.memory_size)
+ == (sizeof(DbTable) + sizeof(DbFixation))));
}
return reds;
}
@@ -2012,11 +2170,18 @@ static void do_db_tree_foreach_offheap(TreeDbTerm *,
void (*)(ErlOffHeap *, void *),
void *);
+void db_foreach_offheap_tree_common(TreeDbTerm *root,
+ void (*func)(ErlOffHeap *, void *),
+ void * arg)
+{
+ do_db_tree_foreach_offheap(root, func, arg);
+}
+
static void db_foreach_offheap_tree(DbTable *tbl,
void (*func)(ErlOffHeap *, void *),
void * arg)
{
- do_db_tree_foreach_offheap(tbl->tree.root, func, arg);
+ db_foreach_offheap_tree_common(tbl->tree.root, func, arg);
}
@@ -2041,13 +2206,14 @@ do_db_tree_foreach_offheap(TreeDbTerm *tdbt,
do_db_tree_foreach_offheap(tdbt->right, func, arg);
}
-static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) {
+static TreeDbTerm *linkout_tree(DbTableCommon *tb, TreeDbTerm **root,
+ Eterm key, DbTreeStack *stack) {
TreeDbTerm **tstack[STACK_NEED];
int tpos = 0;
int dstack[STACK_NEED+1];
int dpos = 0;
int state = 0;
- TreeDbTerm **this = &tb->root;
+ TreeDbTerm **this = root;
Sint c;
int dir;
TreeDbTerm *q = NULL;
@@ -2058,7 +2224,7 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) {
* keep the balance. As in insert, we do the stacking ourselves.
*/
- reset_static_stack(tb);
+ reset_stack(stack);
dstack[dpos++] = DIR_END;
for (;;) {
if (!*this) { /* Failure */
@@ -2084,30 +2250,30 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) {
tstack[tpos++] = this;
state = delsub(this);
}
- erts_atomic_dec_nob(&tb->common.nitems);
+ erts_atomic_dec_nob(&tb->nitems);
break;
}
}
while (state && ( dir = dstack[--dpos] ) != DIR_END) {
this = tstack[--tpos];
if (dir == DIR_LEFT) {
- state = balance_left(this);
+ state = tree_balance_left(this);
} else {
- state = balance_right(this);
+ state = tree_balance_right(this);
}
}
return q;
}
-static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
- Eterm object)
+static TreeDbTerm *linkout_object_tree(DbTableCommon *tb, TreeDbTerm **root,
+ Eterm object, DbTableTree *stack)
{
TreeDbTerm **tstack[STACK_NEED];
int tpos = 0;
int dstack[STACK_NEED+1];
int dpos = 0;
int state = 0;
- TreeDbTerm **this = &tb->root;
+ TreeDbTerm **this = root;
Sint c;
int dir;
TreeDbTerm *q = NULL;
@@ -2122,7 +2288,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
key = GETKEY(tb, tuple_val(object));
- reset_static_stack(tb);
+ reset_static_stack(stack);
dstack[dpos++] = DIR_END;
for (;;) {
if (!*this) { /* Failure */
@@ -2136,7 +2302,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
tstack[tpos++] = this;
this = &((*this)->right);
} else { /* Equal key, found the only possible matching object*/
- if (!db_eq(&tb->common,object,&(*this)->dbterm)) {
+ if (!db_eq(tb,object,&(*this)->dbterm)) {
return NULL;
}
q = (*this);
@@ -2151,16 +2317,16 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
tstack[tpos++] = this;
state = delsub(this);
}
- erts_atomic_dec_nob(&tb->common.nitems);
+ erts_atomic_dec_nob(&tb->nitems);
break;
}
}
while (state && ( dir = dstack[--dpos] ) != DIR_END) {
this = tstack[--tpos];
if (dir == DIR_LEFT) {
- state = balance_left(this);
+ state = tree_balance_left(this);
} else {
- state = balance_right(this);
+ state = tree_balance_right(this);
}
}
return q;
@@ -2170,7 +2336,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
** For the select functions, analyzes the pattern and determines which
** part of the tree should be searched. Also compiles the match program
*/
-static int analyze_pattern(DbTableTree *tb, Eterm pattern,
+static int analyze_pattern(DbTableCommon *tb, TreeDbTerm **root, Eterm pattern,
extra_match_validator_t extra_validator, /* Optional callback */
struct mp_info *mpi)
{
@@ -2231,7 +2397,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
guards[i] = guard = ptpl[2];
bodies[i] = body = ptpl[3];
- if(extra_validator != NULL && !extra_validator(tb->common.keypos, match, guard, body)) {
+ if(extra_validator != NULL && !extra_validator(tb->keypos, match, guard, body)) {
if (buff != sbuff) {
erts_free(ERTS_ALC_T_DB_TMP, buff);
}
@@ -2244,7 +2410,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
++i;
partly_bound = NIL;
- res = key_given(tb, tpl, &(mpi->save_term), &partly_bound);
+ res = key_given(tb, root, tpl, &(mpi->save_term), &partly_bound);
if ( res >= 0 ) { /* Can match something */
key = 0;
mpi->something_can_match = 1;
@@ -2308,7 +2474,7 @@ static SWord do_free_tree_continue(DbTableTree *tb, SWord reds)
PUSH_NODE(&tb->static_stack, root);
root = p;
} else {
- free_term(tb, root);
+ free_term((DbTable*)tb, root);
if (--reds < 0) {
return reds; /* Done enough for now */
}
@@ -2322,7 +2488,7 @@ static SWord do_free_tree_continue(DbTableTree *tb, SWord reds)
/*
* Deletion helpers
*/
-static int balance_left(TreeDbTerm **this)
+int tree_balance_left(TreeDbTerm **this)
{
TreeDbTerm *p, *p1, *p2;
int b1, b2, h = 1;
@@ -2367,7 +2533,7 @@ static int balance_left(TreeDbTerm **this)
return h;
}
-static int balance_right(TreeDbTerm **this)
+int tree_balance_right(TreeDbTerm **this)
{
TreeDbTerm *p, *p1, *p2;
int b1, b2, h = 1;
@@ -2439,7 +2605,7 @@ static int delsub(TreeDbTerm **this)
h = 1;
while (tpos && h) {
r = tstack[--tpos];
- h = balance_right(r);
+ h = tree_balance_right(r);
}
return h;
}
@@ -2448,11 +2614,13 @@ static int delsub(TreeDbTerm **this)
* Helper for db_slot
*/
-static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot)
+static TreeDbTerm *slot_search(Process *p, TreeDbTerm *root,
+ Sint slot, DbTable *tb,
+ DbTableTree *stack_container)
{
TreeDbTerm *this;
TreeDbTerm *tmp;
- DbTreeStack* stack = get_any_stack(tb);
+ DbTreeStack* stack = get_any_stack(tb,stack_container);
ASSERT(stack != NULL);
if (slot == 1) { /* Don't search from where we are if we are
@@ -2465,7 +2633,7 @@ static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot)
stack->pos = 0;
}
if (EMPTY_NODE(stack)) {
- this = tb->root;
+ this = root;
if (this == NULL)
goto done;
while (this->left != NULL){
@@ -2514,7 +2682,7 @@ static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot)
}
}
done:
- release_stack(tb,stack);
+ release_stack(tb,stack_container,stack);
return this;
}
@@ -2522,7 +2690,8 @@ done:
* Find next and previous in sort order
*/
-static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
+static TreeDbTerm *find_next(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key) {
TreeDbTerm *this;
TreeDbTerm *tmp;
Sint c;
@@ -2534,7 +2703,7 @@ static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
}
}
if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */
- if (( this = tb->root ) == NULL)
+ if (( this = root ) == NULL)
return NULL;
for (;;) {
PUSH_NODE(stack, this);
@@ -2578,7 +2747,8 @@ static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
return this;
}
-static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
+static TreeDbTerm *find_prev(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key) {
TreeDbTerm *this;
TreeDbTerm *tmp;
Sint c;
@@ -2590,7 +2760,7 @@ static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
}
}
if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */
- if (( this = tb->root ) == NULL)
+ if (( this = root ) == NULL)
return NULL;
for (;;) {
PUSH_NODE(stack, this);
@@ -2634,8 +2804,8 @@ static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
return this;
}
-static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
- Eterm key)
+static TreeDbTerm *find_next_from_pb_key(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key)
{
TreeDbTerm *this;
TreeDbTerm *tmp;
@@ -2643,7 +2813,7 @@ static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
/* spool the stack, we have to "re-search" */
stack->pos = stack->slot = 0;
- if (( this = tb->root ) == NULL)
+ if (( this = root ) == NULL)
return NULL;
for (;;) {
PUSH_NODE(stack, this);
@@ -2667,8 +2837,8 @@ static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
}
}
-static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
- Eterm key)
+static TreeDbTerm *find_prev_from_pb_key(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key)
{
TreeDbTerm *this;
TreeDbTerm *tmp;
@@ -2676,7 +2846,7 @@ static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
/* spool the stack, we have to "re-search" */
stack->pos = stack->slot = 0;
- if (( this = tb->root ) == NULL)
+ if (( this = root ) == NULL)
return NULL;
for (;;) {
PUSH_NODE(stack, this);
@@ -2704,16 +2874,17 @@ static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
/*
* Just lookup a node
*/
-static TreeDbTerm *find_node(DbTableTree *tb, Eterm key)
+static TreeDbTerm *find_node(DbTableCommon *tb, TreeDbTerm *root,
+ Eterm key, DbTableTree *stack_container)
{
TreeDbTerm *this;
Sint res;
- DbTreeStack* stack = get_static_stack(tb);
+ DbTreeStack* stack = get_static_stack(stack_container);
if(!stack || EMPTY_NODE(stack)
|| !cmp_key_eq(tb, key, (this=TOP_NODE(stack)))) {
- this = tb->root;
+ this = root;
while (this != NULL && (res = cmp_key(tb,key,this)) != 0) {
if (res < 0)
this = this->left;
@@ -2722,7 +2893,7 @@ static TreeDbTerm *find_node(DbTableTree *tb, Eterm key)
}
}
if (stack) {
- release_stack(tb,stack);
+ release_stack((DbTable*)tb,stack_container,stack);
}
return this;
}
@@ -2730,12 +2901,12 @@ static TreeDbTerm *find_node(DbTableTree *tb, Eterm key)
/*
* Lookup a node and return the address of the node pointer in the tree
*/
-static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key)
+static TreeDbTerm **find_node2(DbTableCommon *tb, TreeDbTerm **root, Eterm key)
{
TreeDbTerm **this;
Sint res;
- this = &tb->root;
+ this = root;
while ((*this) != NULL && (res = cmp_key(tb, key, *this)) != 0) {
if (res < 0)
this = &((*this)->left);
@@ -2752,7 +2923,8 @@ static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key)
* Tries to reuse the existing stack for performance.
*/
-static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *this) {
+static TreeDbTerm **find_ptr(DbTableCommon *tb, TreeDbTerm **root,
+ DbTreeStack *stack, TreeDbTerm *this) {
Eterm key = GETKEY(tb, this->dbterm.tpl);
TreeDbTerm *tmp;
TreeDbTerm *parent;
@@ -2765,7 +2937,7 @@ static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *th
}
}
if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */
- if (( tmp = tb->root ) == NULL)
+ if (( tmp = *root ) == NULL)
return NULL;
for (;;) {
PUSH_NODE(stack, tmp);
@@ -2791,7 +2963,7 @@ static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *th
parent = TOPN_NODE(stack, 1);
if (parent == NULL)
- return ((this != tb->root) ? NULL : &(tb->root));
+ return ((this != *root) ? NULL : root);
if (parent->left == this)
return &(parent->left);
if (parent->right == this)
@@ -2799,12 +2971,11 @@ static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *th
return NULL;
}
-static int
-db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
- DbUpdateHandle* handle)
+int db_lookup_dbterm_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root,
+ Eterm key, Eterm obj, DbUpdateHandle* handle,
+ DbTableTree *stack_container)
{
- DbTableTree *tb = &tbl->tree;
- TreeDbTerm **pp = find_node2(tb, key);
+ TreeDbTerm **pp = find_node2(&tbl->common, root, key);
int flags = 0;
if (pp == NULL) {
@@ -2815,18 +2986,19 @@ db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
int arity = arityval(*objp);
Eterm *htop, *hend;
- ASSERT(arity >= tb->common.keypos);
+ ASSERT(arity >= tbl->common.keypos);
htop = HAlloc(p, arity + 1);
hend = htop + arity + 1;
sys_memcpy(htop, objp, sizeof(Eterm) * (arity + 1));
- htop[tb->common.keypos] = key;
+ htop[tbl->common.keypos] = key;
obj = make_tuple(htop);
- if (db_put_tree(tbl, obj, 1) != DB_ERROR_NONE) {
+ if (db_put_tree_common(&tbl->common, root,
+ obj, 1, stack_container) != DB_ERROR_NONE) {
return 0;
}
- pp = find_node2(tb, key);
+ pp = find_node2(&tbl->common, root, key);
ASSERT(pp != NULL);
HRelease(p, hend, htop);
flags |= DB_NEW_OBJECT;
@@ -2841,21 +3013,28 @@ db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
return 1;
}
-static void
-db_finalize_dbterm_tree(int cret, DbUpdateHandle *handle)
+static int
+db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
+ DbUpdateHandle* handle)
{
- DbTable *tbl = handle->tb;
DbTableTree *tb = &tbl->tree;
+ return db_lookup_dbterm_tree_common(p, tbl, &tb->root, key, obj, handle, tb);
+}
+
+void db_finalize_dbterm_tree_common(int cret, DbUpdateHandle *handle,
+ DbTableTree *stack_container)
+{
+ DbTable *tbl = handle->tb;
TreeDbTerm *bp = (TreeDbTerm *) *handle->bp;
if (handle->flags & DB_NEW_OBJECT && cret != DB_ERROR_NONE) {
Eterm ret;
- db_erase_tree(tbl, GETKEY(tb, bp->dbterm.tpl), &ret);
+ db_erase_tree(tbl, GETKEY(&tbl->common, bp->dbterm.tpl), &ret);
} else if (handle->flags & DB_MUST_RESIZE) {
db_finalize_resize(handle, offsetof(TreeDbTerm,dbterm));
- reset_static_stack(tb);
+ reset_static_stack(stack_container);
- free_term(tb, bp);
+ free_term(tbl, bp);
}
#ifdef DEBUG
handle->dbterm = 0;
@@ -2863,13 +3042,22 @@ db_finalize_dbterm_tree(int cret, DbUpdateHandle *handle)
return;
}
+static void
+db_finalize_dbterm_tree(int cret, DbUpdateHandle *handle)
+{
+ DbTable *tbl = handle->tb;
+ DbTableTree *tb = &tbl->tree;
+ db_finalize_dbterm_tree_common(cret, handle, tb);
+}
+
/*
* Traverse the tree with a callback function, used by db_match_xxx
*/
-static void traverse_backwards(DbTableTree *tb,
+static void traverse_backwards(DbTableCommon *tb,
+ TreeDbTerm **root,
DbTreeStack* stack,
Eterm lastkey,
- int (*doit)(DbTableTree *,
+ int (*doit)(DbTableCommon *,
TreeDbTerm *,
void *,
int),
@@ -2879,7 +3067,7 @@ static void traverse_backwards(DbTableTree *tb,
if (lastkey == THE_NON_VALUE) {
stack->pos = stack->slot = 0;
- if (( this = tb->root ) == NULL) {
+ if (( this = *root ) == NULL) {
return;
}
while (this != NULL) {
@@ -2887,15 +3075,15 @@ static void traverse_backwards(DbTableTree *tb,
this = this->right;
}
this = TOP_NODE(stack);
- next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl));
+ next = find_prev(tb, *root, stack, GETKEY(tb, this->dbterm.tpl));
if (!((*doit)(tb, this, context, 0)))
return;
} else {
- next = find_prev(tb, stack, lastkey);
+ next = find_prev(tb, *root, stack, lastkey);
}
while ((this = next) != NULL) {
- next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl));
+ next = find_prev(tb, *root, stack, GETKEY(tb, this->dbterm.tpl));
if (!((*doit)(tb, this, context, 0)))
return;
}
@@ -2904,10 +3092,11 @@ static void traverse_backwards(DbTableTree *tb,
/*
* Traverse the tree with a callback function, used by db_match_xxx
*/
-static void traverse_forward(DbTableTree *tb,
+static void traverse_forward(DbTableCommon *tb,
+ TreeDbTerm **root,
DbTreeStack* stack,
Eterm lastkey,
- int (*doit)(DbTableTree *,
+ int (*doit)(DbTableCommon *,
TreeDbTerm *,
void *,
int),
@@ -2917,7 +3106,7 @@ static void traverse_forward(DbTableTree *tb,
if (lastkey == THE_NON_VALUE) {
stack->pos = stack->slot = 0;
- if (( this = tb->root ) == NULL) {
+ if (( this = *root ) == NULL) {
return;
}
while (this != NULL) {
@@ -2925,15 +3114,15 @@ static void traverse_forward(DbTableTree *tb,
this = this->left;
}
this = TOP_NODE(stack);
- next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl));
+ next = find_next(tb, *root, stack, GETKEY(tb, this->dbterm.tpl));
if (!((*doit)(tb, this, context, 1)))
return;
} else {
- next = find_next(tb, stack, lastkey);
+ next = find_next(tb, *root, stack, lastkey);
}
while ((this = next) != NULL) {
- next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl));
+ next = find_next(tb, *root, stack, GETKEY(tb, this->dbterm.tpl));
if (!((*doit)(tb, this, context, 1)))
return;
}
@@ -2942,10 +3131,11 @@ static void traverse_forward(DbTableTree *tb,
/*
* Traverse the tree with an update callback function, used by db_select_replace
*/
-static void traverse_update_backwards(DbTableTree *tb,
+static void traverse_update_backwards(DbTableCommon *tb,
+ TreeDbTerm **root,
DbTreeStack* stack,
Eterm lastkey,
- int (*doit)(DbTableTree*,
+ int (*doit)(DbTableCommon*,
TreeDbTerm**,
void*,
int),
@@ -2956,7 +3146,7 @@ static void traverse_update_backwards(DbTableTree *tb,
if (lastkey == THE_NON_VALUE) {
stack->pos = stack->slot = 0;
- if (( this = tb->root ) == NULL) {
+ if (( this = *root ) == NULL) {
return;
}
while (this != NULL) {
@@ -2964,23 +3154,23 @@ static void traverse_update_backwards(DbTableTree *tb,
this = this->right;
}
this = TOP_NODE(stack);
- this_ptr = find_ptr(tb, stack, this);
+ this_ptr = find_ptr(tb, root, stack, this);
ASSERT(this_ptr != NULL);
res = (*doit)(tb, this_ptr, context, 0);
REPLACE_TOP_NODE(stack, *this_ptr);
- next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
+ next = find_prev(tb, *root, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
if (!res)
return;
} else {
- next = find_prev(tb, stack, lastkey);
+ next = find_prev(tb, *root, stack, lastkey);
}
while ((this = next) != NULL) {
- this_ptr = find_ptr(tb, stack, this);
+ this_ptr = find_ptr(tb, root, stack, this);
ASSERT(this_ptr != NULL);
res = (*doit)(tb, this_ptr, context, 0);
REPLACE_TOP_NODE(stack, *this_ptr);
- next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
+ next = find_prev(tb, *root, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
if (!res)
return;
}
@@ -2990,8 +3180,8 @@ static void traverse_update_backwards(DbTableTree *tb,
* Returns 0 if not given 1 if given and -1 on no possible match
* if key is given; *ret is set to point to the object concerned.
*/
-static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret,
- Eterm *partly_bound)
+static int key_given(DbTableCommon *tb, TreeDbTerm **root, Eterm pattern,
+ TreeDbTerm ***ret, Eterm *partly_bound)
{
TreeDbTerm **this;
Eterm key;
@@ -2999,11 +3189,11 @@ static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret,
ASSERT(ret != NULL);
if (pattern == am_Underscore || db_is_variable(pattern) != -1)
return 0;
- key = db_getkey(tb->common.keypos, pattern);
+ key = db_getkey(tb->keypos, pattern);
if (is_non_value(key))
return -1; /* can't possibly match anything */
if (!db_has_variable(key)) { /* Bound key */
- if (( this = find_node2(tb, key) ) == NULL) {
+ if (( this = find_node2(tb, root, key) ) == NULL) {
return -1;
}
*ret = this;
@@ -3296,7 +3486,7 @@ static int do_partly_bound_can_match_greater(Eterm a, Eterm b,
* Callback functions for the different match functions
*/
-static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+static int doit_select(DbTableCommon *tb, TreeDbTerm *this, void *ptr,
int forward)
{
struct select_context *sc = (struct select_context *) ptr;
@@ -3314,7 +3504,7 @@ static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0))) {
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, &hp, 2);
+ ret = db_match_dbterm(tb, sc->p,sc->mp, &this->dbterm, &hp, 2);
if (is_value(ret)) {
sc->accum = CONS(hp, ret, sc->accum);
}
@@ -3331,7 +3521,7 @@ static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 1;
}
-static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+static int doit_select_count(DbTableCommon *tb, TreeDbTerm *this, void *ptr,
int forward)
{
struct select_count_context *sc = (struct select_count_context *) ptr;
@@ -3345,7 +3535,7 @@ static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0)) {
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, NULL, 0);
+ ret = db_match_dbterm(tb, sc->p, sc->mp, &this->dbterm, NULL, 0);
if (ret == am_true) {
++(sc->got);
}
@@ -3355,7 +3545,7 @@ static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 1;
}
-static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+static int doit_select_chunk(DbTableCommon *tb, TreeDbTerm *this, void *ptr,
int forward)
{
struct select_context *sc = (struct select_context *) ptr;
@@ -3374,7 +3564,7 @@ static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, &hp, 2);
+ ret = db_match_dbterm(tb, sc->p, sc->mp, &this->dbterm, &hp, 2);
if (is_value(ret)) {
++(sc->got);
sc->accum = CONS(hp, ret, sc->accum);
@@ -3393,7 +3583,7 @@ static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr,
}
-static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+static int doit_select_delete(DbTableCommon *tb, TreeDbTerm *this, void *ptr,
int forward)
{
struct select_delete_context *sc = (struct select_delete_context *) ptr;
@@ -3401,7 +3591,7 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
Eterm key;
if (sc->erase_lastterm)
- free_term(tb, sc->lastterm);
+ free_term((DbTable*)tb, sc->lastterm);
sc->erase_lastterm = 0;
sc->lastterm = this;
@@ -3409,10 +3599,10 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
cmp_partly_bound(sc->end_condition,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0)
return 0;
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, NULL, 0);
+ ret = db_match_dbterm(tb, sc->p, sc->mp, &this->dbterm, NULL, 0);
if (ret == am_true) {
key = GETKEY(sc->tb, this->dbterm.tpl);
- linkout_tree(sc->tb, key);
+ linkout_tree(sc->tb, sc->root, key, sc->stack);
sc->erase_lastterm = 1;
++sc->accum;
}
@@ -3422,7 +3612,7 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 1;
}
-static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
+static int doit_select_replace(DbTableCommon *tb, TreeDbTerm **this, void *ptr,
int forward)
{
struct select_replace_context *sc = (struct select_replace_context *) ptr;
@@ -3436,13 +3626,13 @@ static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
GETKEY_WITH_POS(sc->keypos, (*this)->dbterm.tpl)) > 0)) {
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &(*this)->dbterm, NULL, 0);
+ ret = db_match_dbterm(tb, sc->p, sc->mp, &(*this)->dbterm, NULL, 0);
if (is_value(ret)) {
TreeDbTerm* new;
TreeDbTerm* old = *this;
#ifdef DEBUG
- Eterm key = db_getkey(tb->common.keypos, ret);
+ Eterm key = db_getkey(tb->keypos, ret);
ASSERT(is_value(key));
ASSERT(cmp_key(tb, key, old) == 0);
#endif
@@ -3452,7 +3642,7 @@ static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
new->balance = old->balance;
sc->lastobj = new->dbterm.tpl;
*this = new;
- free_term(tb, old);
+ free_term((DbTable*)tb, old);
++(sc->replaced);
}
if (--(sc->max) <= 0) {
@@ -3462,7 +3652,7 @@ static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
}
#ifdef TREE_DEBUG
-static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
+static void do_dump_tree2(DbTableCommon* tb, int to, void *to_arg, int show,
TreeDbTerm *t, int offset)
{
if (t == NULL)
@@ -3471,7 +3661,7 @@ static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
if (show) {
const char* prefix;
Eterm term;
- if (tb->common.compress) {
+ if (tb->compress) {
prefix = "key=";
term = GETKEY(tb, t->dbterm.tpl);
}
@@ -3526,7 +3716,7 @@ static void check_slot_pos(DbTableTree *tb)
"element position %d is really 0x%08X, when stack says "
"it's 0x%08X\n", tb->stack.slot, t,
tb->stack.array[tb->stack.pos - 1]);
- do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
}
}
@@ -3541,14 +3731,14 @@ static void check_saved_stack(DbTableTree *tb)
if (t != stack->array[0]) {
erts_fprintf(stderr,"tb->stack[0] is 0x%08X, should be 0x%08X\n",
stack->array[0], t);
- do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
return;
}
while (n < stack->pos) {
if (t == NULL) {
erts_fprintf(stderr, "NULL pointer in tree when stack not empty,"
" stack depth is %d\n", n);
- do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
return;
}
n++;
@@ -3562,7 +3752,7 @@ static void check_saved_stack(DbTableTree *tb)
"represent child pointer in tree!"
"(left == 0x%08X, right == 0x%08X\n",
n, tb->stack[n], t->left, t->right);
- do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
return;
}
}
@@ -3581,7 +3771,7 @@ static int check_table_tree(DbTableTree* tb, TreeDbTerm *t)
erts_fprintf(stderr,"balance = %d, left = 0x%08X, right = 0x%08X\n",
t->balance, t->left, t->right);
erts_fprintf(stderr,"\nDump:\n---------------------------------\n");
- do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, t, 0);
+ do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, t, 0);
erts_fprintf(stderr,"\n---------------------------------\n");
}
return ((rh > lh) ? rh : lh) + 1;
diff --git a/erts/emulator/beam/erl_db_tree_util.h b/erts/emulator/beam/erl_db_tree_util.h
new file mode 100644
index 0000000000..fbd9a9124a
--- /dev/null
+++ b/erts/emulator/beam/erl_db_tree_util.h
@@ -0,0 +1,151 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef _DB_TREE_UTIL_H
+#define _DB_TREE_UTIL_H
+
+/*
+** Internal functions and macros used by both the CA tree and the AVL tree
+*/
+
+/*
+** A stack of this size is enough for an AVL tree with more than
+** 0xFFFFFFFF elements. May be subject to change if
+** the datatype of the element counter is changed to a 64 bit integer.
+** The Maximal height of an AVL tree is calculated as:
+** h(n) <= 1.4404 * log(n + 2) - 0.328
+** Where n denotes the number of nodes, h(n) the height of the tree
+** with n nodes and log is the binary logarithm.
+*/
+
+#define STACK_NEED 50
+
+#define PUSH_NODE(Dtt, Tdt) \
+ ((Dtt)->array[(Dtt)->pos++] = Tdt)
+
+#define POP_NODE(Dtt) \
+ (((Dtt)->pos) ? \
+ (Dtt)->array[--((Dtt)->pos)] : NULL)
+
+#define TOP_NODE(Dtt) \
+ ((Dtt->pos) ? \
+ (Dtt)->array[(Dtt)->pos - 1] : NULL)
+
+#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL)
+
+static ERTS_INLINE void free_term(DbTable *tb, TreeDbTerm* p)
+{
+ db_free_term(tb, p, offsetof(TreeDbTerm, dbterm));
+}
+
+/*
+** Some macros for "direction stacks"
+*/
+#define DIR_LEFT 0
+#define DIR_RIGHT 1
+#define DIR_END 2
+
+static ERTS_INLINE Sint cmp_key(DbTableCommon* tb, Eterm key, TreeDbTerm* obj) {
+ return CMP(key, GETKEY(tb,obj->dbterm.tpl));
+}
+
+int tree_balance_left(TreeDbTerm **this);
+int tree_balance_right(TreeDbTerm **this);
+
+int db_first_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm *ret, DbTableTree *stack_container);
+int db_next_tree_common(Process *p, DbTable *tbl,
+ TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTreeStack* stack);
+int db_last_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm *ret, DbTableTree *stack_container);
+int db_prev_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTreeStack* stack);
+int db_put_tree_common(DbTableCommon *tb, TreeDbTerm **root, Eterm obj,
+ int key_clash_fail, DbTableTree *stack_container);
+int db_get_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTableTree *stack_container);
+int db_get_element_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key,
+ int ndex, Eterm *ret, DbTableTree *stack_container);
+int db_member_tree_common(DbTableCommon *tb, TreeDbTerm *root, Eterm key, Eterm *ret,
+ DbTableTree *stack_container);
+int db_erase_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm key, Eterm *ret,
+ DbTreeStack *stack /* NULL if no static stack */);
+int db_erase_object_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm object,
+ Eterm *ret, DbTableTree *stack_container);
+int db_slot_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm slot_term, Eterm *ret,
+ DbTableTree *stack_container);
+int db_select_chunk_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root,
+ Eterm tid, Eterm pattern, Sint chunk_size,
+ int reverse, Eterm *ret,
+ DbTableTree *stack_container);
+int db_select_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root,
+ Eterm tid, Eterm pattern, int reverse, Eterm *ret,
+ DbTableTree *stack_container);
+int db_select_delete_tree_common(Process *p, DbTable *tbl,
+ TreeDbTerm **root,
+ Eterm tid, Eterm pattern,
+ Eterm *ret,
+ DbTreeStack* stack);
+int db_select_continue_tree_common(Process *p,
+ DbTableCommon *tb,
+ TreeDbTerm **root,
+ Eterm continuation,
+ Eterm *ret,
+ DbTableTree *stack_container);
+int db_select_delete_continue_tree_common(Process *p,
+ DbTable *tbl,
+ TreeDbTerm **root,
+ Eterm continuation,
+ Eterm *ret,
+ DbTreeStack* stack);
+int db_select_count_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root,
+ Eterm tid, Eterm pattern, Eterm *ret,
+ DbTableTree *stack_container);
+int db_select_count_continue_tree_common(Process *p,
+ DbTableCommon *tb,
+ TreeDbTerm **root,
+ Eterm continuation,
+ Eterm *ret,
+ DbTableTree *stack_container);
+int db_select_replace_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm **root,
+ Eterm tid, Eterm pattern, Eterm *ret,
+ DbTableTree *stack_container);
+int db_select_replace_continue_tree_common(Process *p,
+ DbTableCommon *tb,
+ TreeDbTerm **root,
+ Eterm continuation,
+ Eterm *ret,
+ DbTableTree *stack_container);
+int db_take_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root,
+ Eterm key, Eterm *ret,
+ DbTreeStack *stack /* NULL if no static stack */);
+void db_print_tree_common(fmtfn_t to, void *to_arg,
+ int show, TreeDbTerm *root, DbTable *tbl);
+void db_foreach_offheap_tree_common(TreeDbTerm *root,
+ void (*func)(ErlOffHeap *, void *),
+ void * arg);
+int db_lookup_dbterm_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root,
+ Eterm key, Eterm obj, DbUpdateHandle* handle,
+ DbTableTree *stack_container);
+void db_finalize_dbterm_tree_common(int cret, DbUpdateHandle *handle,
+ DbTableTree *stack_container);
+#endif /* _DB_TREE_UTIL_H */
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 6ec3b4f98f..be74bc795c 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -90,6 +90,8 @@ typedef struct {
Uint new_size;
int flags;
void* lck;
+ void* lck2;
+ int current_level;
} DbUpdateHandle;
@@ -274,23 +276,26 @@ typedef struct db_table_common {
} DbTableCommon;
/* These are status bit patterns */
-#define DB_PRIVATE (1 << 0)
-#define DB_PROTECTED (1 << 1)
-#define DB_PUBLIC (1 << 2)
-#define DB_DELETE (1 << 3) /* table is being deleted */
-#define DB_SET (1 << 4)
-#define DB_BAG (1 << 5)
-#define DB_DUPLICATE_BAG (1 << 6)
-#define DB_ORDERED_SET (1 << 7)
-#define DB_FINE_LOCKED (1 << 8) /* write_concurrency */
-#define DB_FREQ_READ (1 << 9) /* read_concurrency */
-#define DB_NAMED_TABLE (1 << 10)
-#define DB_BUSY (1 << 11)
+#define DB_PRIVATE (1 << 0)
+#define DB_PROTECTED (1 << 1)
+#define DB_PUBLIC (1 << 2)
+#define DB_DELETE (1 << 3) /* table is being deleted */
+#define DB_SET (1 << 4)
+#define DB_BAG (1 << 5)
+#define DB_DUPLICATE_BAG (1 << 6)
+#define DB_ORDERED_SET (1 << 7)
+#define DB_CA_ORDERED_SET (1 << 8)
+#define DB_FINE_LOCKED (1 << 9) /* write_concurrency */
+#define DB_FREQ_READ (1 << 10) /* read_concurrency */
+#define DB_NAMED_TABLE (1 << 11)
+#define DB_BUSY (1 << 12)
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
#define IS_TREE_TABLE(Status) (!!((Status) & \
DB_ORDERED_SET))
+#define IS_CATREE_TABLE(Status) (!!((Status) & \
+ DB_CA_ORDERED_SET))
#define NFIXED(T) (erts_refc_read(&(T)->common.fix_count,0))
#define IS_FIXED(T) (NFIXED(T) != 0)
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index 31b4817fb1..9ef7c39d41 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -53,7 +53,8 @@ typedef enum {
enum ErlNifSelectFlags {
ERL_NIF_SELECT_READ = (1 << 0),
ERL_NIF_SELECT_WRITE = (1 << 1),
- ERL_NIF_SELECT_STOP = (1 << 2)
+ ERL_NIF_SELECT_STOP = (1 << 2),
+ ERL_NIF_SELECT_CANCEL = (1 << 3)
};
/*
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 463ae898a3..3b10ef8c25 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -91,6 +91,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "db_tab", "address" },
{ "db_tab_fix", "address" },
{ "db_hash_slot", "address" },
+ { "erl_db_catree_base_node", "dynamic" },
+ { "erl_db_catree_route_node", "dynamic" },
{ "resource_monitors", "address" },
{ "driver_list", NULL },
{ "proc_msgq", "pid" },
@@ -707,6 +709,26 @@ erts_lc_get_lock_order_id(char *name)
return (Sint16) -1;
}
+int
+erts_lc_is_check_order(char *name)
+{
+ int i;
+ if (!name || name[0] == '\0')
+ erts_fprintf(stderr, "Missing lock name\n");
+
+ for (i = 0; i < ERTS_LOCK_ORDER_SIZE; i++) {
+ if (sys_strcmp(erts_lock_order[i].name, name) == 0) {
+ if (erts_lock_order[i].internal_order != NULL &&
+ sys_strcmp(erts_lock_order[i].internal_order, "dynamic") == 0) {
+ return 0;
+ }else{
+ return 1;
+ }
+ }
+ }
+ return 1;
+}
+
static int compare_locked_by_id(lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
{
if(locked_lock->id < comparand->id) {
@@ -987,15 +1009,17 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
*/
- /* Check that we are not trying to lock this lock twice */
- for (tl_lck = thr->locked.last; tl_lck; tl_lck = tl_lck->prev) {
- if (tl_lck->id < lck->id
- || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
- if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", thr, lck, options);
- break;
- }
- }
+ if (lck->check_order) {
+ /* Check that we are not trying to lock this lock twice */
+ for (tl_lck = thr->locked.last; tl_lck; tl_lck = tl_lck->prev) {
+ if (tl_lck->id < lck->id
+ || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
+ if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
+ lock_twice("Trylocking", thr, lck, options);
+ break;
+ }
+ }
+ }
#ifndef ERTS_LC_ALLWAYS_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
/* We only force busy if a lock order violation would occur
@@ -1044,7 +1068,7 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t
for (tl_lck = thr->locked.last; tl_lck; tl_lck = tl_lck->prev) {
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
- if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
+ if (tl_lck->id == lck->id && tl_lck->extra == lck->extra && lck->check_order)
lock_twice("Trylocking", thr, lck, options);
if (locked) {
ll->next = tl_lck->next;
@@ -1164,9 +1188,10 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
ASSERT(0 < lck->id && lck->id < ERTS_LOCK_ORDER_SIZE);
thr->matrix.m[lck->id][0] = 1;
}
- else if (thr->locked.last->id < lck->id
- || (thr->locked.last->id == lck->id
- && thr->locked.last->extra < lck->extra)) {
+ else if (( ! lck->check_order && thr->locked.last->id == lck->id) ||
+ (thr->locked.last->id < lck->id
+ || (thr->locked.last->id == lck->id
+ && thr->locked.last->extra < lck->extra))) {
lc_locked_lock_t* ll;
if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, thr->locked.last->flags)) {
type_order_violation("locking ", thr, lck);
@@ -1296,7 +1321,7 @@ void
erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags)
{
lck->id = erts_lc_get_lock_order_id(name);
-
+ lck->check_order = erts_lc_is_check_order(name);
lck->extra = (UWord) &lck->extra;
ASSERT(is_not_immed(lck->extra));
lck->flags = flags;
@@ -1308,6 +1333,7 @@ void
erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra)
{
lck->id = erts_lc_get_lock_order_id(name);
+ lck->check_order = erts_lc_is_check_order(name);
lck->extra = extra;
ASSERT(is_immed(lck->extra));
lck->flags = flags;
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index d10e32985a..472e88ad65 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -46,6 +46,7 @@
typedef struct {
int inited;
Sint16 id;
+ int check_order;
erts_lock_flags_t flags;
erts_lock_options_t taken_options;
UWord extra;
@@ -53,11 +54,12 @@ typedef struct {
#define ERTS_LC_INITITALIZED 0x7f7f7f7f
-#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), 0, (X)}
+#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), 1, (F), 0, (X)}
void erts_lc_init(void);
void erts_lc_late_init(void);
Sint16 erts_lc_get_lock_order_id(char *name);
+int erts_lc_is_check_order(char *name);
void erts_lc_check(erts_lc_lock_t *have, int have_len,
erts_lc_lock_t *have_not, int have_not_len);
void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 4c09496ef1..c5227a0c23 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -160,6 +160,8 @@ typedef int ErlNifEvent;
#define ERL_NIF_SELECT_STOP_SCHEDULED (1 << 1)
#define ERL_NIF_SELECT_INVALID_EVENT (1 << 2)
#define ERL_NIF_SELECT_FAILED (1 << 3)
+#define ERL_NIF_SELECT_READ_CANCELLED (1 << 4)
+#define ERL_NIF_SELECT_WRITE_CANCELLED (1 << 5)
typedef enum
{
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 1f147011a8..f4dc60941a 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -60,6 +60,10 @@ static int references_atoms_need_init = 1;
static ErtsMonotonicTime orig_node_tab_delete_delay;
static ErtsMonotonicTime node_tab_delete_delay;
+
+static void report_gc_active_dist_entry(Eterm sysname, enum dist_entry_state);
+
+
/* -- The distribution table ---------------------------------------------- */
#define ErtsBin2DistEntry(B) \
@@ -366,31 +370,43 @@ DistEntry *erts_find_dist_entry(Eterm sysname)
}
DistEntry *
-erts_dhandle_to_dist_entry(Eterm dhandle)
+erts_dhandle_to_dist_entry(Eterm dhandle, Uint32 *conn_id)
{
+ Eterm *tpl;
Binary *bin;
- if (!is_internal_magic_ref(dhandle))
+
+ if (!is_boxed(dhandle))
+ return NULL;
+ tpl = boxed_val(dhandle);
+ if (tpl[0] != make_arityval(2) || !is_small(tpl[1])
+ || !is_internal_magic_ref(tpl[2]))
return NULL;
- bin = erts_magic_ref2bin(dhandle);
+ *conn_id = unsigned_val(tpl[1]);
+ bin = erts_magic_ref2bin(tpl[2]);
if (ERTS_MAGIC_BIN_DESTRUCTOR(bin) != erts_dist_entry_destructor)
return NULL;
return ErtsBin2DistEntry(bin);
}
Eterm
-erts_build_dhandle(Eterm **hpp, ErlOffHeap* ohp, DistEntry *dep)
+erts_build_dhandle(Eterm **hpp, ErlOffHeap* ohp,
+ DistEntry *dep, Uint32 conn_id)
{
Binary *bin = ErtsDistEntry2Bin(dep);
+ Eterm mref, dhandle;
ASSERT(bin);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == erts_dist_entry_destructor);
- return erts_mk_magic_ref(hpp, ohp, bin);
+ mref = erts_mk_magic_ref(hpp, ohp, bin);
+ dhandle = TUPLE2(*hpp, make_small(conn_id), mref);
+ *hpp += 3;
+ return dhandle;
}
Eterm
-erts_make_dhandle(Process *c_p, DistEntry *dep)
+erts_make_dhandle(Process *c_p, DistEntry *dep, Uint32 conn_id)
{
- Eterm *hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
- return erts_build_dhandle(&hp, &c_p->off_heap, dep);
+ Eterm *hp = HAlloc(c_p, ERTS_DHANDLE_SIZE);
+ return erts_build_dhandle(&hp, &c_p->off_heap, dep, conn_id);
}
static void start_timer_delete_dist_entry(void *vdep);
@@ -451,6 +467,19 @@ static void try_delete_dist_entry(DistEntry* dep)
{
erts_aint_t refc;
+ erts_de_rwlock(dep);
+ if (dep->state != ERTS_DE_STATE_IDLE && de_refc_read(dep,0) == 0) {
+ Eterm sysname = dep->sysname;
+ enum dist_entry_state state = dep->state;
+
+ if (dep->state != ERTS_DE_STATE_PENDING)
+ ERTS_INTERNAL_ERROR("Garbage collecting connected distribution entry");
+ erts_abort_connection_rwunlock(dep);
+ report_gc_active_dist_entry(sysname, state);
+ }
+ else
+ erts_de_rwunlock(dep);
+
erts_rwmtx_rwlock(&erts_dist_table_rwmtx);
/*
* Another thread might have looked up this dist entry after
@@ -477,6 +506,34 @@ static void try_delete_dist_entry(DistEntry* dep)
}
}
+static void report_gc_active_dist_entry(Eterm sysname,
+ enum dist_entry_state state)
+{
+ char *state_str;
+ erts_dsprintf_buf_t *dsbuf = erts_create_logger_dsbuf();
+ switch (state) {
+ case ERTS_DE_STATE_CONNECTED:
+ state_str = "connected";
+ break;
+ case ERTS_DE_STATE_PENDING:
+ state_str = "pending connect";
+ break;
+ case ERTS_DE_STATE_EXITING:
+ state_str = "exiting";
+ break;
+ case ERTS_DE_STATE_IDLE:
+ state_str = "idle";
+ break;
+ default:
+ state_str = "unknown";
+ break;
+ }
+ erts_dsprintf(dsbuf, "Garbage collecting distribution "
+ "entry for node %T in state: %s",
+ sysname, state_str);
+ erts_send_error_to_logger_nogl(dsbuf);
+}
+
int erts_dist_entry_destructor(Binary *bin)
{
DistEntry *dep = ErtsBin2DistEntry(bin);
@@ -582,7 +639,7 @@ erts_set_dist_entry_not_connected(DistEntry *dep)
if(dep->next)
dep->next->prev = dep->prev;
- dep->state = ERTS_DE_STATE_EXITING;
+ dep->state = ERTS_DE_STATE_IDLE;
dep->flags = 0;
dep->prev = NULL;
dep->cid = NIL;
@@ -1863,8 +1920,9 @@ setup_reference_table(void)
if (ohp)
insert_offheap(ohp, HEAP_REF, prt->common.id);
/* Insert controller */
- if (prt->dist_entry)
- insert_dist_entry(prt->dist_entry,
+ dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
+ if (dep)
+ insert_dist_entry(dep,
CTRL_REF,
prt->common.id,
0);
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index 9a792b10b1..c44f1f8991 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -214,9 +214,10 @@ int erts_lc_is_de_rwlocked(DistEntry *);
int erts_lc_is_de_rlocked(DistEntry *);
#endif
int erts_dist_entry_destructor(Binary *bin);
-DistEntry *erts_dhandle_to_dist_entry(Eterm dhandle);
-Eterm erts_build_dhandle(Eterm **hpp, ErlOffHeap*, DistEntry*);
-Eterm erts_make_dhandle(Process *c_p, DistEntry *dep);
+DistEntry *erts_dhandle_to_dist_entry(Eterm dhandle, Uint32* connection_id);
+#define ERTS_DHANDLE_SIZE (3+ERTS_MAGIC_REF_THING_SIZE)
+Eterm erts_build_dhandle(Eterm **hpp, ErlOffHeap*, DistEntry*, Uint32 conn_id);
+Eterm erts_make_dhandle(Process *c_p, DistEntry*, Uint32 conn_id);
ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np);
ERTS_GLB_INLINE void erts_de_rlock(DistEntry *dep);
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index 9b52b648e5..2be0a5bf74 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -112,8 +112,10 @@ typedef struct line_buf { /* Buffer used in line oriented I/O */
*/
#define ERTS_PRTSD_SCHED_ID 0
+#define ERTS_PRTSD_DIST_ENTRY 1
+#define ERTS_PRTSD_CONN_ID 2
-#define ERTS_PRTSD_SIZE 1
+#define ERTS_PRTSD_SIZE 3
typedef struct {
void *data[ERTS_PRTSD_SIZE];
@@ -154,7 +156,6 @@ struct _erl_drv_port {
Uint bytes_out; /* Number of bytes written */
ErlPortIOQueue ioq; /* driver accessible i/o queue */
- DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */
char *name; /* String used in the open */
erts_driver_t* drv_ptr;
UWord drv_data;
@@ -257,6 +258,8 @@ ERTS_GLB_INLINE void *
erts_prtsd_get(Port *prt, int ix)
{
ErtsPrtSD *psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd);
+
+ ASSERT((unsigned)ix < ERTS_PRTSD_SIZE);
if (!psd)
return NULL;
ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
@@ -272,6 +275,7 @@ erts_prtsd_set(Port *prt, int ix, void *data)
psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd);
+ ASSERT((unsigned)ix < ERTS_PRTSD_SIZE);
if (psd) {
#ifdef ETHR_ORDERED_READ_DEPEND
ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
@@ -459,7 +463,7 @@ erts_port_unlock(Port *prt)
ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP)
#define ERTS_PORT_SCHED_ID(P, ID) \
- ((Uint) (UWord) erts_prtsd_set((P), ERTS_PSD_SCHED_ID, (void *) (UWord) (ID)))
+ ((Uint) (UWord) erts_prtsd_set((P), ERTS_PRTSD_SCHED_ID, (void *) (UWord) (ID)))
extern const Port erts_invalid_port;
#define ERTS_PORT_LOCK_BUSY ((Port *) &erts_invalid_port)
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 3aa04d825e..a21acb9a57 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -9621,7 +9621,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (state & ERTS_PSFLG_RUNNING_SYS) {
if (state & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)) {
int local_only = (!!(p->flags & F_LOCAL_SIGS_ONLY)
- & !(state & ERTS_PSFLG_SUSPENDED));
+ & !(state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLGS_DIRTY_WORK)));
if (!local_only | !!(state & ERTS_PSFLG_SIG_Q)) {
int sig_reds;
/*
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 904993ceb6..621ba108ba 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -671,30 +671,37 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
byte *ext,
Uint size,
DistEntry *dep,
- ErtsAtomCache *cache,
- Uint32 *connection_id)
+ Uint32 conn_id,
+ ErtsAtomCache *cache)
{
-#undef ERTS_EXT_FAIL
-#undef ERTS_EXT_HDR_FAIL
-#if 1
-#define ERTS_EXT_FAIL goto fail
-#define ERTS_EXT_HDR_FAIL goto bad_hdr
-#else
-#define ERTS_EXT_FAIL abort()
-#define ERTS_EXT_HDR_FAIL abort()
-#endif
+ register byte *ep;
+
+ edep->heap_size = -1;
+ edep->flags = 0;
+ edep->dep = dep;
+
+ ASSERT(dep);
+ erts_de_rlock(dep);
- register byte *ep = ext;
ASSERT(dep->flags & DFLAG_UTF8_ATOMS);
- edep->heap_size = -1;
- edep->ext_endp = ext+size;
+ if ((dep->state != ERTS_DE_STATE_CONNECTED &&
+ dep->state != ERTS_DE_STATE_PENDING)
+ || dep->connection_id != conn_id) {
+ erts_de_runlock(dep);
+ return ERTS_PREP_DIST_EXT_CLOSED;
+ }
- if (size < 2)
- ERTS_EXT_FAIL;
+ if (!(dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)) {
+ /* Skip PASS_THROUGH */
+ ext++;
+ size--;
+ }
+ edep->ext_endp = ext + size;
+ ep = ext;
- if (!dep)
- ERTS_INTERNAL_ERROR("Invalid use");
+ if (size < 2)
+ goto fail;
if (ep[0] != VERSION_MAGIC) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
@@ -703,28 +710,17 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
"channel %d\n",
dist_entry_channel_no(dep));
erts_send_error_to_logger_nogl(dsbufp);
- ERTS_EXT_FAIL;
+ goto fail;
}
- edep->flags = 0;
- edep->dep = dep;
-
- erts_de_rlock(dep);
-
- if (dep->state != ERTS_DE_STATE_CONNECTED &&
- dep->state != ERTS_DE_STATE_PENDING) {
- erts_de_runlock(dep);
- return ERTS_PREP_DIST_EXT_CLOSED;
- }
if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)
edep->flags |= ERTS_DIST_EXT_DFLAG_HDR;
- *connection_id = dep->connection_id;
edep->connection_id = dep->connection_id;
if (ep[1] != DIST_HEADER) {
if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR)
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
edep->attab.size = 0;
edep->extp = ext;
}
@@ -733,17 +729,17 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
int no_atoms;
if (!(edep->flags & ERTS_DIST_EXT_DFLAG_HDR))
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
#undef CHKSIZE
#define CHKSIZE(SZ) \
- do { if ((SZ) > edep->ext_endp - ep) ERTS_EXT_HDR_FAIL; } while(0)
+ do { if ((SZ) > edep->ext_endp - ep) goto bad_hdr; } while(0)
CHKSIZE(1+1+1);
ep += 2;
no_atoms = (int) get_int8(ep);
if (no_atoms < 0 || ERTS_ATOM_CACHE_SIZE < no_atoms)
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
ep++;
if (no_atoms) {
int long_atoms = 0;
@@ -821,18 +817,18 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
/* atom already cached */
cix += (int) get_int8(ep);
if (cix >= ERTS_ATOM_CACHE_SIZE)
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
ep++;
atom = cache->in_arr[cix];
if (!is_atom(atom))
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
edep->attab.atom[tix] = atom;
}
else {
/* new cached atom */
cix += (int) get_int8(ep);
if (cix >= ERTS_ATOM_CACHE_SIZE)
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
ep++;
if (long_atoms) {
CHKSIZE(2);
@@ -850,7 +846,7 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
ERTS_ATOM_ENC_UTF8,
0);
if (is_non_value(atom))
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
ep += len;
cache->in_arr[cix] = atom;
edep->attab.atom[tix] = atom;
@@ -870,12 +866,12 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
edep->extp = ep;
#ifdef ERTS_DEBUG_USE_DIST_SEP
if (*ep != VERSION_MAGIC)
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
#endif
}
#ifdef ERTS_DEBUG_USE_DIST_SEP
if (*ep != VERSION_MAGIC)
- ERTS_EXT_FAIL;
+ goto fail;
#endif
erts_de_runlock(dep);
@@ -883,8 +879,6 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
return ERTS_PREP_DIST_EXT_SUCCESS;
#undef CHKSIZE
-#undef ERTS_EXT_FAIL
-#undef ERTS_EXT_HDR_FAIL
bad_hdr: {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
@@ -901,7 +895,7 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
}
fail: {
erts_de_runlock(dep);
- erts_kill_dist_connection(dep, *connection_id);
+ erts_kill_dist_connection(dep, conn_id);
}
return ERTS_PREP_DIST_EXT_FAILED;
}
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index bbd9b4bad2..edac177cc6 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -182,7 +182,7 @@ void erts_destroy_dist_ext_copy(ErtsDistExternal *);
#define ERTS_PREP_DIST_EXT_CLOSED (1)
int erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint,
- DistEntry *, ErtsAtomCache *, Uint32 *);
+ DistEntry *, Uint32 conn_id, ErtsAtomCache *);
Sint erts_decode_dist_ext_size(ErtsDistExternal *);
Eterm erts_decode_dist_ext(ErtsHeapFactory* factory, ErtsDistExternal *);
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 2cf268162d..21ae205237 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1083,7 +1083,7 @@ extern int erts_do_net_exits(DistEntry*, Eterm);
extern int distribution_info(fmtfn_t, void *);
extern int is_node_name_atom(Eterm a);
-extern int erts_net_message(Port *, DistEntry *,
+extern int erts_net_message(Port *, DistEntry *, Uint32 conn_id,
byte *, ErlDrvSizeT, byte *, ErlDrvSizeT);
extern void init_dist(void);
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 133ab485d9..5325480901 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -375,7 +375,6 @@ static Port *create_port(char *name,
prt->control_flags = 0;
prt->bytes_in = 0;
prt->bytes_out = 0;
- prt->dist_entry = NULL;
ERTS_PORT_INIT_CONNECTED(prt, pid);
prt->common.u.alive.reg = NULL;
ERTS_PTMR_INIT(prt);
@@ -3613,12 +3612,12 @@ terminate_port(Port *prt)
erts_cleanup_port_data(prt);
+ ASSERT(erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY) == NULL);
+
psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd);
if (psd)
erts_free(ERTS_ALC_T_PRTSD, psd);
- ASSERT(prt->dist_entry == NULL);
-
kill_port(prt);
/*
@@ -3759,10 +3758,12 @@ erts_deliver_port_exit(Port *prt, Eterm from, Eterm reason, int send_closed,
DRV_MONITOR_UNLOCK_PDL(prt);
}
- if ((state & ERTS_PORT_SFLG_DISTRIBUTION) && prt->dist_entry) {
- erts_do_net_exits(prt->dist_entry, modified_reason);
- erts_deref_dist_entry(prt->dist_entry);
- prt->dist_entry = NULL;
+ if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
+ ASSERT(dep);
+ erts_do_net_exits(dep, modified_reason);
+ erts_deref_dist_entry(dep);
+ erts_prtsd_set(prt, ERTS_PRTSD_DIST_ENTRY, NULL);
erts_atomic32_read_band_relb(&prt->state,
~ERTS_PORT_SFLG_DISTRIBUTION);
}
@@ -5050,7 +5051,7 @@ set_busy_port(ErlDrvPort dprt, int on)
DTRACE1(port_not_busy, port_str);
}
#endif
- if (prt->dist_entry) {
+ if (erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY) != NULL) {
/*
* Processes suspended on distribution ports are
* normally queued on the dist entry.
@@ -6169,9 +6170,12 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
else
erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len));
if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
- erts_atomic64_inc_nob(&prt->dist_entry->in);
+ DistEntry* dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
+ Uint32 conn_id = (Uint32)(UWord) erts_prtsd_get(prt, ERTS_PRTSD_CONN_ID);
+ erts_atomic64_inc_nob(&dep->in);
return erts_net_message(prt,
- prt->dist_entry,
+ dep,
+ conn_id,
(byte*) hbuf, hlen,
(byte*) (bin->orig_bytes+offs), len);
}
@@ -6210,15 +6214,19 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
else
erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len));
if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
- erts_atomic64_inc_nob(&prt->dist_entry->in);
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
+ Uint32 conn_id = (Uint32)(UWord) erts_prtsd_get(prt, ERTS_PRTSD_CONN_ID);
+ erts_atomic64_inc_nob(&dep->in);
if (len == 0)
return erts_net_message(prt,
- prt->dist_entry,
+ dep,
+ conn_id,
NULL, 0,
(byte*) hbuf, hlen);
else
return erts_net_message(prt,
- prt->dist_entry,
+ dep,
+ conn_id,
(byte*) hbuf, hlen,
(byte*) buf, len);
}
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 91381bd60d..c9f7006384 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -624,6 +624,26 @@ static size_t my_strnlen(const char *s, size_t maxlen)
# define VALGRIND_MAKE_MEM_DEFINED(ptr,size)
#endif
+#ifndef __WIN32__
+/* Calculate CMSG_NXTHDR without having a struct msghdr*.
+ * CMSG_LEN only caters for alignment for start of data.
+ * To get how much to advance we need to use CMSG_SPACE
+ * on the payload length. To get the payload length we
+ * take the calculated cmsg->cmsg_len and subtract the
+ * header length. To get the header length we use
+ * the pointer difference from the cmsg start pointer
+ * to the CMSG_DATA(cmsg) pointer.
+ */
+#define LEN_CMSG_DATA(cmsg) \
+ ((cmsg)->cmsg_len - ((char*)CMSG_DATA(cmsg) - (char*)(cmsg)))
+#define NXT_CMSG_HDR(cmsg) \
+ ((struct cmsghdr*)(((char*)(cmsg)) + CMSG_SPACE(LEN_CMSG_DATA(cmsg))))
+#endif
+
+#if !defined(IPV6_PKTOPTIONS) && defined(IPV6_2292PKTOPTIONS)
+#define IPV6_PKTOPTIONS IPV6_2292PKTOPTIONS
+#endif
+
/*
Magic errno value used locally for return of {error, system_limit}
- the emulator definition of SYSTEM_LIMIT is not available here.
@@ -787,6 +807,11 @@ static size_t my_strnlen(const char *s, size_t maxlen)
#define INET_LOPT_LINE_DELIM 40 /* Line delimiting char */
#define INET_OPT_TCLASS 41 /* IPv6 transport class */
#define INET_OPT_BIND_TO_DEVICE 42 /* get/set network device the socket is bound to */
+#define INET_OPT_RECVTOS 43 /* IP_RECVTOS ancillary data */
+#define INET_OPT_RECVTCLASS 44 /* IPV6_RECVTCLASS ancillary data */
+#define INET_OPT_PKTOPTIONS 45 /* IP(V6)_PKTOPTIONS get ancillary data */
+#define INET_OPT_TTL 46 /* IP_TTL */
+#define INET_OPT_RECVTTL 47 /* IP_RECVTTL ancillary data */
/* SCTP options: a separate range, from 100: */
#define SCTP_OPT_RTOINFO 100
#define SCTP_OPT_ASSOCINFO 101
@@ -862,6 +887,11 @@ static size_t my_strnlen(const char *s, size_t maxlen)
#define SCTP_FLAG_SACDELAY_ENABLE (32 /* am_sackdelay_enable */)
#define SCTP_FLAG_SACDELAY_DISABLE (64 /* am_sackdelay_disable */)
+/* Flags for recv_cmsgflags */
+#define INET_CMSG_RECVTOS (1 << 0) /* am_recvtos, am_tos */
+#define INET_CMSG_RECVTCLASS (1 << 1) /* am_recvtclass, am_tclass */
+#define INET_CMSG_RECVTTL (1 << 2) /* am_recvttl, am_ttl */
+
/*
** End of interface constants.
**--------------------------------------------------------------------------*/
@@ -916,8 +946,13 @@ static size_t my_strnlen(const char *s, size_t maxlen)
#ifdef HAVE_SCTP
#define PACKET_ERL_DRV_TERM_DATA_LEN 512
#else
+#ifndef __WIN32__
+/* Assume we have recvmsg() and might need room for ancillary data */
+#define PACKET_ERL_DRV_TERM_DATA_LEN 64
+#else
#define PACKET_ERL_DRV_TERM_DATA_LEN 32
#endif
+#endif
#define BIN_REALLOC_MARGIN(x) ((x)/4) /* 25% */
@@ -1084,6 +1119,7 @@ typedef struct {
char *netns; /* Socket network namespace name
as full file path */
#endif
+ int recv_cmsgflags; /* Which ancillary data to expect */
} inet_descriptor;
@@ -1336,6 +1372,11 @@ static ErlDrvTermData am_udp_error;
#ifdef HAVE_SYS_UN_H
static ErlDrvTermData am_local;
#endif
+#ifndef __WIN32__
+static ErlDrvTermData am_tos;
+static ErlDrvTermData am_tclass;
+static ErlDrvTermData am_ttl;
+#endif
#ifdef HAVE_SCTP
static ErlDrvTermData am_sctp;
static ErlDrvTermData am_sctp_passive;
@@ -1356,8 +1397,9 @@ static ErlDrvTermData am_sndbuf;
static ErlDrvTermData am_reuseaddr;
static ErlDrvTermData am_dontroute;
static ErlDrvTermData am_priority;
-static ErlDrvTermData am_tos;
-static ErlDrvTermData am_tclass;
+static ErlDrvTermData am_recvtos;
+static ErlDrvTermData am_recvtclass;
+static ErlDrvTermData am_recvttl;
static ErlDrvTermData am_ipv6_v6only;
static ErlDrvTermData am_netns;
static ErlDrvTermData am_bind_to_device;
@@ -1548,10 +1590,10 @@ static void *realloc_wrapper(void *current, ErlDrvSizeT size){
# define ASSOC_ID_LEN 4
# define LOAD_ASSOC_ID LOAD_UINT
# define LOAD_ASSOC_ID_CNT LOAD_UINT_CNT
-# define SCTP_ANC_BUFF_SIZE INET_DEF_BUFFER/2 /* XXX: not very good... */
#else
# define IS_SCTP(desc) 0
#endif
+# define ANC_BUFF_SIZE INET_DEF_BUFFER/2 /* XXX: not very good... */
#ifdef HAVE_UDP
static int load_address(ErlDrvTermData* spec, int i, char* buf)
@@ -2752,6 +2794,66 @@ static int inet_async_data(inet_descriptor* desc, const char* buf, int len)
}
}
+#ifndef __WIN32__
+static int load_cmsg_int(ErlDrvTermData *spec, int i,
+ struct cmsghdr *cmsg) {
+ union u {
+ byte uint8;
+ Uint16 uint16;
+ Uint32 uint32;
+ Uint64 uint64;
+ } *p;
+ p = (union u*) CMSG_DATA(cmsg);
+ switch (LEN_CMSG_DATA(cmsg) * CHAR_BIT) {
+ case 8:
+ return LOAD_INT(spec, i, p->uint8);
+ case 16:
+ return LOAD_INT(spec, i, p->uint16);
+
+ case 32:
+ return LOAD_INT(spec, i, p->uint32);
+
+ case 64:
+ return LOAD_INT(spec, i, p->uint64);
+ }
+ return LOAD_INT(spec, i, 0);
+}
+
+static int parse_ancillary_data_item(ErlDrvTermData *spec, int i,
+ struct cmsghdr *cmsg, int *n) {
+#define LOAD_CMSG_INT(proto, type, am) \
+ if (cmsg->cmsg_level == (proto) && \
+ cmsg->cmsg_type == (type)) { \
+ i = LOAD_ATOM(spec, i, (am)); \
+ i = load_cmsg_int(spec, i, cmsg); \
+ i = LOAD_TUPLE(spec, i, 2); \
+ (*n)++; \
+ return i; \
+ }
+#if defined(IPPROTO_IP) && defined(IP_TOS)
+ LOAD_CMSG_INT(IPPROTO_IP, IP_TOS, am_tos);
+#endif
+#if defined(IPPROTO_IPV6) && defined(IPV6_TCLASS)
+ LOAD_CMSG_INT(IPPROTO_IPV6, IPV6_TCLASS, am_tclass);
+#endif
+#if defined(IPPROTO_IP) && defined(IP_TTL)
+ LOAD_CMSG_INT(IPPROTO_IP, IP_TTL, am_ttl);
+#endif
+ /* BSD uses the RECV* names in CMSG fields */
+#if defined(IPPROTO_IP) && defined(IP_RECVTOS)
+ LOAD_CMSG_INT(IPPROTO_IP, IP_RECVTOS, am_tos);
+#endif
+#if defined(IPPROTO_IPV6) && defined(IPV6_RECVTCLASS)
+ LOAD_CMSG_INT(IPPROTO_IPV6, IPV6_RECVTCLASS, am_tclass);
+#endif
+#if defined(IPPROTO_IP) && defined(IP_RECVTTL)
+ LOAD_CMSG_INT(IPPROTO_IP, IP_RECVTTL, am_ttl);
+#endif
+#undef LOAD_CMSG_INT
+ return i;
+}
+#endif /* #ifndef __WIN32__ */
+
#ifdef HAVE_SCTP
/*
** SCTP-related atoms:
@@ -2883,11 +2985,18 @@ static int sctp_parse_ancillary_data
for (cmsg = frst_msg; cmsg != NULL; cmsg = CMSG_NXTHDR(mptr,cmsg))
{
struct sctp_sndrcvinfo * sri;
-
+#ifndef __WIN32
+ int old_s;
+
+ /* Parse ancillary data common to UDP */
+ old_s = s;
+ i = parse_ancillary_data_item(spec, i, cmsg, &s);
+ if (s > old_s) continue;
/* Skip other possible ancillary data, e.g. from IPv6: */
if (cmsg->cmsg_level != IPPROTO_SCTP ||
cmsg->cmsg_type != SCTP_SNDRCV)
continue;
+#endif
if (((char*)cmsg + cmsg->cmsg_len) - (char*)frst_msg >
mptr->msg_controllen)
@@ -3227,6 +3336,23 @@ static int sctp_parse_async_event
}
#endif /* HAVE_SCTP */
+#ifndef __WIN32__
+static int udp_parse_ancillary_data(ErlDrvTermData *spec, int i,
+ struct msghdr *mptr) {
+ struct cmsghdr *cmsg;
+ int n;
+
+ n = 0;
+ for (cmsg = CMSG_FIRSTHDR(mptr);
+ cmsg != NULL;
+ cmsg = CMSG_NXTHDR(mptr, cmsg)) {
+ i = parse_ancillary_data_item(spec, i, cmsg, &n);
+ }
+ i = LOAD_NIL(spec, i);
+ return LOAD_LIST(spec, i, n+1);
+}
+#endif /* ifndef __WIN32__ */
+
/*
** passive mode reply:
** for UDP:
@@ -3249,7 +3375,7 @@ static int sctp_parse_async_event
static int
inet_async_binary_data
(inet_descriptor* desc, unsigned int phsz,
- ErlDrvBinary * bin, int offs, int len, void * extra)
+ ErlDrvBinary * bin, int offs, int len, void *mp)
{
unsigned int hsz = desc->hsz + phsz;
ErlDrvTermData spec [PACKET_ERL_DRV_TERM_DATA_LEN];
@@ -3282,9 +3408,10 @@ inet_async_binary_data
if (IS_SCTP(desc))
{ /* For SCTP we always have desc->hsz==0 (i.e., no application-level
headers are used), so hsz==phsz (see above): */
- struct msghdr* mptr;
int sz;
-
+ struct msghdr *mptr;
+
+ mptr = mp;
ASSERT (hsz == phsz && hsz != 0);
sz = len - hsz; /* Size of the msg data proper, w/o the addr */
@@ -3292,7 +3419,6 @@ inet_async_binary_data
i = LOAD_STRING(spec, i, bin->orig_bytes+offs, hsz);
/* Put in the list (possibly empty) of Ancillary Data: */
- mptr = (struct msghdr *) extra;
i = sctp_parse_ancillary_data (spec, i, mptr);
/* Then: Data or Event (Notification)? */
@@ -3321,20 +3447,32 @@ inet_async_binary_data
}
else
#endif /* HAVE_SCTP */
- /* Generic case. Both Addr and Data (or a single list of them together) are
- returned: */
+ {
+ /* Generic case. Both Addr and Data
+ * (or a single list of them together) are returned: */
- if ((desc->mode == INET_MODE_LIST) || (hsz > len)) {
- /* INET_MODE_LIST => [H1,H2,...Hn] */
- i = LOAD_STRING(spec, i, bin->orig_bytes+offs, len);
- }
- else {
- /* INET_MODE_BINARY => [H1,H2,...HSz | Binary] or [Binary]: */
- int sz = len - hsz;
- i = LOAD_BINARY(spec, i, bin, offs+hsz, sz);
- if (hsz > 0)
- i = LOAD_STRING_CONS(spec, i, bin->orig_bytes+offs, hsz);
+ if ((desc->mode == INET_MODE_LIST) || (hsz > len)) {
+ /* INET_MODE_LIST => [H1,H2,...Hn] */
+ i = LOAD_STRING(spec, i, bin->orig_bytes+offs, len);
+ }
+ else {
+ /* INET_MODE_BINARY => [H1,H2,...HSz | Binary] or [Binary]: */
+ int sz = len - hsz;
+ i = LOAD_BINARY(spec, i, bin, offs+hsz, sz);
+ if (hsz > 0)
+ i = LOAD_STRING_CONS(spec, i, bin->orig_bytes+offs, hsz);
+ }
+
+#ifndef __WIN32__
+ if (mp) {
+ /* We got ancillary data from an UDP recvmsg.
+ * Insert an additional tuple level {[F|AddrData],AncData} */
+ i = udp_parse_ancillary_data(spec, i, (struct msghdr*)mp);
+ i = LOAD_TUPLE(spec, i, 2);
+ }
+#endif
}
+
/* Close up the {ok, ...} or {error, ...} tuple: */
i = LOAD_TUPLE(spec, i, 2);
@@ -3466,8 +3604,9 @@ static int tcp_error_message(tcp_descriptor* desc, int err)
** [AddrLen, H2,...,HSz] are msg headers for UDP AF_UNIX only
** Data : List() | Binary()
*/
-static int packet_binary_message
- (inet_descriptor* desc, ErlDrvBinary* bin, int offs, int len, void* extra)
+static int packet_binary_message(inet_descriptor* desc,
+ ErlDrvBinary* bin, int offs, int len,
+ void *mp)
{
unsigned int hsz = desc->hsz;
ErlDrvTermData spec [PACKET_ERL_DRV_TERM_DATA_LEN];
@@ -3492,8 +3631,14 @@ static int packet_binary_message
# ifdef HAVE_SCTP
if (!IS_SCTP(desc))
- {
# endif
+ {
+#ifndef __WIN32__
+ if (mp) i = udp_parse_ancillary_data(spec, i, (struct msghdr*)mp);
+#endif
+ /* We got ancillary data from an UDP recvmsg.
+ * Insert an additional tuple level {AncData,[F|AddrData]}
+ */
if ((desc->mode == INET_MODE_LIST) || (hsz > len))
/* INET_MODE_LIST, or only headers => [H1,H2,...Hn] */
i = LOAD_STRING(spec, i, bin->orig_bytes+offs, len);
@@ -3505,16 +3650,24 @@ static int packet_binary_message
if (hsz > 0)
i = LOAD_STRING_CONS(spec, i, bin->orig_bytes+offs, hsz);
}
-# ifdef HAVE_SCTP
+ /* Close up the outer 5-or-6-tuple */
+#ifndef __WIN32__
+ if (mp) i = LOAD_TUPLE(spec, i, 6);
+ else
+#endif
+ i = LOAD_TUPLE(spec, i, 5);
}
+# ifdef HAVE_SCTP
else
- { /* For SCTP we always have desc->hsz==0 (i.e., no application-level
+ {
+ struct msghdr *mptr;
+
+ mptr = mp;
+ /* For SCTP we always have desc->hsz==0 (i.e., no application-level
headers are used): */
- struct msghdr* mptr;
ASSERT(hsz == 0);
/* Put in the list (possibly empty) of Ancillary Data: */
- mptr = (struct msghdr *) extra;
i = sctp_parse_ancillary_data (spec, i, mptr);
/* Then: Data or Event (Notification)? */
@@ -3540,11 +3693,11 @@ static int packet_binary_message
/* Close up the {[AncilData], Event_OR_Data} tuple: */
i = LOAD_TUPLE (spec, i, 2);
+ /* Close up the outer 5-tuple: */
+ i = LOAD_TUPLE(spec, i, 5);
}
# endif /* HAVE_SCTP */
- /* Close up the outer 5-tuple: */
- i = LOAD_TUPLE(spec, i, 5);
ASSERT(i <= PACKET_ERL_DRV_TERM_DATA_LEN);
return erl_drv_output_term(desc->dport, spec, i);
}
@@ -3678,19 +3831,19 @@ tcp_reply_binary_data(tcp_descriptor* desc, ErlDrvBinary* bin, int offs, int len
static int
packet_reply_binary_data(inet_descriptor* desc, unsigned int hsz,
ErlDrvBinary * bin, int offs, int len,
- void * extra)
+ void *mp)
{
int code;
if (desc->active == INET_PASSIVE)
/* "inet" is actually for both UDP and SCTP, as well as TCP! */
- return inet_async_binary_data(desc, hsz, bin, offs, len, extra);
+ return inet_async_binary_data(desc, hsz, bin, offs, len, mp);
else
{ /* INET_ACTIVE or INET_ONCE: */
if (desc->deliver == INET_DELIVER_PORT)
code = inet_port_binary_data(desc, bin, offs, len);
else
- code = packet_binary_message(desc, bin, offs, len, extra);
+ code = packet_binary_message(desc, bin, offs, len, mp);
if (code < 0)
return code;
INET_CHECK_ACTIVE_TO_PASSIVE(desc);
@@ -3757,8 +3910,9 @@ static void inet_init_sctp(void) {
INIT_ATOM(reuseaddr);
INIT_ATOM(dontroute);
INIT_ATOM(priority);
- INIT_ATOM(tos);
- INIT_ATOM(tclass);
+ INIT_ATOM(recvtos);
+ INIT_ATOM(recvtclass);
+ INIT_ATOM(recvttl);
INIT_ATOM(ipv6_v6only);
INIT_ATOM(netns);
INIT_ATOM(bind_to_device);
@@ -3901,6 +4055,11 @@ static int inet_init()
#endif
INIT_ATOM(empty_out_q);
INIT_ATOM(ssl_tls);
+#ifndef __WIN32__
+ INIT_ATOM(tos);
+ INIT_ATOM(tclass);
+ INIT_ATOM(ttl);
+#endif
INIT_ATOM(http_eoh);
INIT_ATOM(http_header);
@@ -5119,8 +5278,8 @@ static ErlDrvSSizeT inet_ctl_ifget(inet_descriptor* desc,
sys_memcpy(sptr,
sdlp->sdl_data + sdlp->sdl_nlen,
sdlp->sdl_alen);
- freeifaddrs(ifa);
sptr += sdlp->sdl_alen;
+ freeifaddrs(ifa);
#endif
break;
}
@@ -5933,7 +6092,8 @@ static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
but ditto for the other worked and that was actually the requested
option, failure was still reported to erlang. */
-#if defined(IP_TOS) && defined(SOL_IP) && defined(SO_PRIORITY)
+#if defined(IP_TOS) && defined(IPPROTO_IP) \
+ && defined(SO_PRIORITY) && !defined(__WIN32__)
static int setopt_prio_tos_trick
(int fd, int proto, int type, char* arg_ptr, int arg_sz, int propagate)
{
@@ -5955,14 +6115,14 @@ static int setopt_prio_tos_trick
res_prio = sock_getopt(fd, SOL_SOCKET, SO_PRIORITY,
(char *) &tmp_ival_prio, &tmp_arg_sz_prio);
- res_tos = sock_getopt(fd, SOL_IP, IP_TOS,
+ res_tos = sock_getopt(fd, IPPROTO_IP, IP_TOS,
(char *) &tmp_ival_tos, &tmp_arg_sz_tos);
res = sock_setopt(fd, proto, type, arg_ptr, arg_sz);
if (res == 0) {
if (type != SO_PRIORITY) {
if (type != IP_TOS && res_tos == 0) {
res_tos = sock_setopt(fd,
- SOL_IP,
+ IPPROTO_IP,
IP_TOS,
(char *) &tmp_ival_tos,
tmp_arg_sz_tos);
@@ -6006,7 +6166,7 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
int proto;
int opt;
struct linger li_val;
-#ifdef HAVE_MULTICAST_SUPPORT
+#if defined(HAVE_MULTICAST_SUPPORT) && defined(IPPROTO_IP)
struct ip_mreq mreq_val;
#endif
int ival;
@@ -6029,6 +6189,8 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
/* XXX { int i; for(i=0;i<len;++i) fprintf(stderr,"0x%02X, ", (unsigned) ptr[i]); fprintf(stderr,"\r\n");} */
while(len >= 5) {
+ int recv_cmsgflags;
+
opt = *ptr++;
ival = get_int32(ptr);
ptr += 4;
@@ -6037,6 +6199,7 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
arg_sz = sizeof(ival);
proto = SOL_SOCKET;
propagate = 0;
+ recv_cmsgflags = desc->recv_cmsgflags;
switch(opt) {
case INET_LOPT_HEADER:
@@ -6287,28 +6450,80 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
(long)desc->port, desc->s, ival));
break;
#else
+ /* inet_fill_opts always returns a value for this option,
+ * so we need to ignore it if not implemented */
continue;
#endif
case INET_OPT_TOS:
-#if defined(IP_TOS) && defined(SOL_IP)
- proto = SOL_IP;
+#if defined(IP_TOS) && defined(IPPROTO_IP)
+ proto = IPPROTO_IP;
type = IP_TOS;
propagate = 1;
DEBUGF(("inet_set_opts(%ld): s=%d, IP_TOS=%d\r\n",
(long)desc->port, desc->s, ival));
break;
#else
+ /* inet_fill_opts always returns a value for this option,
+ * so we need to ignore it if not implemented. */
continue;
#endif
-#if defined(IPV6_TCLASS) && defined(SOL_IPV6)
+#if defined(IPV6_TCLASS) && defined(IPPROTO_IPV6)
case INET_OPT_TCLASS:
- proto = SOL_IPV6;
+ proto = IPPROTO_IPV6;
type = IPV6_TCLASS;
propagate = 1;
DEBUGF(("inet_set_opts(%ld): s=%d, IPV6_TCLASS=%d\r\n",
(long)desc->port, desc->s, ival));
break;
#endif
+#if defined(IP_TTL) && defined(IPPROTO_IP)
+ case INET_OPT_TTL:
+ proto = IPPROTO_IP;
+ type = IP_TTL;
+ propagate = 1;
+ DEBUGF(("inet_set_opts(%ld): s=%d, IP_TTL=%d\r\n",
+ (long)desc->port, desc->s, ival));
+ break;
+#endif
+#if defined(IP_RECVTOS) && defined(IPPROTO_IP)
+ case INET_OPT_RECVTOS:
+ proto = IPPROTO_IP;
+ type = IP_RECVTOS;
+ propagate = 1;
+ recv_cmsgflags =
+ ival ?
+ (desc->recv_cmsgflags | INET_CMSG_RECVTOS) :
+ (desc->recv_cmsgflags & ~INET_CMSG_RECVTOS);
+ DEBUGF(("inet_set_opts(%ld): s=%d, IP_RECVTOS=%d\r\n",
+ (long)desc->port, desc->s, ival));
+ break;
+#endif
+#if defined(IPV6_RECVTCLASS) && defined(IPPROTO_IPV6)
+ case INET_OPT_RECVTCLASS:
+ proto = IPPROTO_IPV6;
+ type = IPV6_RECVTCLASS;
+ propagate = 1;
+ recv_cmsgflags =
+ ival ?
+ (desc->recv_cmsgflags | INET_CMSG_RECVTCLASS) :
+ (desc->recv_cmsgflags & ~INET_CMSG_RECVTCLASS);
+ DEBUGF(("inet_set_opts(%ld): s=%d, IPV6_RECVTCLASS=%d\r\n",
+ (long)desc->port, desc->s, ival));
+ break;
+#endif
+#if defined(IP_RECVTTL) && defined(IPPROTO_IP)
+ case INET_OPT_RECVTTL:
+ proto = IPPROTO_IP;
+ type = IP_RECVTTL;
+ propagate = 1;
+ recv_cmsgflags =
+ ival ?
+ (desc->recv_cmsgflags | INET_CMSG_RECVTTL) :
+ (desc->recv_cmsgflags & ~INET_CMSG_RECVTTL);
+ DEBUGF(("inet_set_opts(%ld): s=%d, IP_RECVTTL=%d\r\n",
+ (long)desc->port, desc->s, ival));
+ break;
+#endif
case TCP_OPT_NODELAY:
proto = IPPROTO_TCP;
@@ -6317,7 +6532,7 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
(long)desc->port, desc->s, ival));
break;
-#ifdef HAVE_MULTICAST_SUPPORT
+#if defined(HAVE_MULTICAST_SUPPORT) && defined(IPPROTO_IP)
case UDP_OPT_MULTICAST_TTL:
proto = IPPROTO_IP;
@@ -6363,10 +6578,10 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
arg_sz = sizeof(mreq_val);
break;
-#endif /* HAVE_MULTICAST_SUPPORT */
+#endif /* defined(HAVE_MULTICAST_SUPPORT) && defined(IPPROTO_IP) */
case INET_OPT_IPV6_V6ONLY:
-#if HAVE_DECL_IPV6_V6ONLY
+#if HAVE_DECL_IPV6_V6ONLY && defined(IPPROTO_IPV6)
proto = IPPROTO_IPV6;
type = IPV6_V6ONLY;
propagate = 1;
@@ -6426,11 +6641,13 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
default:
return -1;
}
-#if defined(IP_TOS) && defined(SOL_IP) && defined(SO_PRIORITY)
+#if defined(IP_TOS) && defined(IPPROTO_IP) \
+ && defined(SO_PRIORITY) && !defined(__WIN32__)
res = setopt_prio_tos_trick (desc->s, proto, type, arg_ptr, arg_sz, propagate);
#else
res = sock_setopt (desc->s, proto, type, arg_ptr, arg_sz);
#endif
+ if (res == 0) desc->recv_cmsgflags = recv_cmsgflags;
if (propagate && res != 0) {
return -1;
}
@@ -6572,10 +6789,14 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
while (curr < ptr + len)
{
+ int recv_cmsgflags;
/* Get the Erlang-encoded option type -- always 1 byte: */
- int eopt = *curr;
+ int eopt;
+
+ eopt = *curr;
curr++;
+ recv_cmsgflags = desc->recv_cmsgflags;
/* Get the option value. XXX: The condition (curr < ptr + len)
does not preclude us from reading from beyond the buffer end,
if the Erlang part of the driver specifies its input wrongly!
@@ -6756,28 +6977,32 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
break;
}
# else
- continue; /* Option not supported -- ignore it */
+ /* inet_fill_opts always returns a value for this option,
+ * so we need to ignore it if not implemented, just in case */
+ continue;
# endif
case INET_OPT_TOS:
-# if defined(IP_TOS) && defined(SOL_IP)
+# if defined(IP_TOS) && defined(IPPROTO_IP)
{
arg.ival= get_int32 (curr); curr += 4;
- proto = SOL_IP;
+ proto = IPPROTO_IP;
type = IP_TOS;
arg_ptr = (char*) (&arg.ival);
arg_sz = sizeof ( arg.ival);
break;
}
# else
- continue; /* Option not supported -- ignore it */
+ /* inet_fill_opts always returns a value for this option,
+ * so we need to ignore it if not implemented, just in case */
+ continue;
# endif
-# if defined(IPV6_TCLASS) && defined(SOL_IPV6)
+# if defined(IPV6_TCLASS) && defined(IPPROTO_IPV6)
case INET_OPT_TCLASS:
{
arg.ival= get_int32 (curr); curr += 4;
- proto = SOL_IPV6;
+ proto = IPPROTO_IPV6;
type = IPV6_TCLASS;
arg_ptr = (char*) (&arg.ival);
arg_sz = sizeof ( arg.ival);
@@ -6785,9 +7010,69 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
}
# endif
+# if defined(IP_TTL) && defined(IPPROTO_IP)
+ case INET_OPT_TTL:
+ {
+ arg.ival= get_int32 (curr); curr += 4;
+ proto = IPPROTO_IP;
+ type = IP_TTL;
+ arg_ptr = (char*) (&arg.ival);
+ arg_sz = sizeof ( arg.ival);
+ break;
+ }
+# endif
+
+# if defined(IP_RECVTOS) && defined(IPPROTO_IP)
+ case INET_OPT_RECVTOS:
+ {
+ arg.ival= get_int32 (curr); curr += 4;
+ proto = IPPROTO_IP;
+ type = IP_RECVTOS;
+ arg_ptr = (char*) (&arg.ival);
+ arg_sz = sizeof ( arg.ival);
+ recv_cmsgflags =
+ arg.ival ?
+ (desc->recv_cmsgflags | INET_CMSG_RECVTOS) :
+ (desc->recv_cmsgflags & ~INET_CMSG_RECVTOS);
+ break;
+ }
+# endif
+
+# if defined(IPV6_RECVTCLASS) && defined(IPPROTO_IPV6)
+ case INET_OPT_RECVTCLASS:
+ {
+ arg.ival= get_int32 (curr); curr += 4;
+ proto = IPPROTO_IPV6;
+ type = IPV6_RECVTCLASS;
+ arg_ptr = (char*) (&arg.ival);
+ arg_sz = sizeof ( arg.ival);
+ recv_cmsgflags =
+ arg.ival ?
+ (desc->recv_cmsgflags | INET_CMSG_RECVTCLASS) :
+ (desc->recv_cmsgflags & ~INET_CMSG_RECVTCLASS);
+ break;
+ }
+# endif
+
+# if defined(IP_RECVTTL) && defined(IPPROTO_IP)
+ case INET_OPT_RECVTTL:
+ {
+ arg.ival= get_int32 (curr); curr += 4;
+ proto = IPPROTO_IP;
+ type = IP_RECVTTL;
+ arg_ptr = (char*) (&arg.ival);
+ arg_sz = sizeof ( arg.ival);
+ recv_cmsgflags =
+ arg.ival ?
+ (desc->recv_cmsgflags | INET_CMSG_RECVTTL) :
+ (desc->recv_cmsgflags & ~INET_CMSG_RECVTTL);
+ break;
+ }
+# endif
+
case INET_OPT_IPV6_V6ONLY:
-# if HAVE_DECL_IPV6_V6ONLY
+# if HAVE_DECL_IPV6_V6ONLY && defined(IPPROTO_IPV6)
{
arg.ival= get_int32 (curr); curr += 4;
proto = IPPROTO_IPV6;
@@ -7037,13 +7322,15 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
*/
return -1;
}
-#if defined(IP_TOS) && defined(SOL_IP) && defined(SO_PRIORITY)
+#if defined(IP_TOS) && defined(IPPROTO_IP) \
+ && defined(SO_PRIORITY) && !defined(__WIN32__)
res = setopt_prio_tos_trick (desc->s, proto, type, arg_ptr, arg_sz, 1);
#else
res = sock_setopt (desc->s, proto, type, arg_ptr, arg_sz);
#endif
/* The return values of "sock_setopt" can only be 0 or -1: */
ASSERT(res == 0 || res == -1);
+ if (res == 0) desc->recv_cmsgflags = recv_cmsgflags;
if (res == -1)
{ /* Got an error, DO NOT continue with other options. However, on
Solaris 10, we DO allow SO_SNDBUF and SO_RCVBUF to fail, assu-
@@ -7064,6 +7351,35 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
}
#endif /* HAVE_SCTP */
+#ifndef __WIN32__
+static void put_cmsg_int32(struct cmsghdr *cmsg, char *ptr) {
+ union u {
+ byte uint8;
+ Uint16 uint16;
+ Uint32 uint32;
+ Uint64 uint64;
+ } *p;
+ p = (union u*) CMSG_DATA(cmsg);
+ switch (LEN_CMSG_DATA(cmsg) * CHAR_BIT) {
+ case 8:
+ put_int32((Uint32) p->uint8, ptr);
+ break;
+ case 16:
+ put_int32((Uint32) p->uint16, ptr);
+ break;
+ case 32:
+ put_int32(p->uint32, ptr);
+ break;
+ case 64:
+ put_int32((Uint32) p->uint64, ptr);
+ break;
+ default:
+ put_int32(0, ptr);
+ }
+ return;
+}
+#endif
+
/* load all option values into the buf and reply
** return total length of reply filled into ptr
** ptr should point to a buffer with 9*len +1 to be safe!!
@@ -7298,8 +7614,8 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
continue;
#endif
case INET_OPT_TOS:
-#if defined(IP_TOS) && defined(SOL_IP)
- proto = SOL_IP;
+#if defined(IP_TOS) && defined(IPPROTO_IP)
+ proto = IPPROTO_IP;
type = IP_TOS;
break;
#else
@@ -7308,14 +7624,50 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
continue;
#endif
case INET_OPT_TCLASS:
-#if defined(IPV6_TCLASS) && defined(SOL_IPV6)
- proto = SOL_IPV6;
+#if defined(IPV6_TCLASS) && defined(IPPROTO_IPV6)
+ proto = IPPROTO_IPV6;
type = IPV6_TCLASS;
break;
#else
TRUNCATE_TO(0,ptr);
continue;
#endif
+ case INET_OPT_TTL:
+#if defined(IP_TTL) && defined(IPPROTO_IP)
+ proto = IPPROTO_IP;
+ type = IP_TTL;
+ break;
+#else
+ TRUNCATE_TO(0,ptr);
+ continue;
+#endif
+ case INET_OPT_RECVTOS:
+#if defined(IP_RECVTOS) && defined(IPPROTO_IP)
+ proto = IPPROTO_IP;
+ type = IP_RECVTOS;
+ break;
+#else
+ TRUNCATE_TO(0,ptr);
+ continue;
+#endif
+ case INET_OPT_RECVTCLASS:
+#if defined(IPV6_RECVTCLASS) && defined(IPPROTO_IPV6)
+ proto = IPPROTO_IPV6;
+ type = IPV6_RECVTCLASS;
+ break;
+#else
+ TRUNCATE_TO(0,ptr);
+ continue;
+#endif
+ case INET_OPT_RECVTTL:
+#if defined(IP_RECVTTL) && defined(IPPROTO_IP)
+ proto = IPPROTO_IP;
+ type = IP_RECVTTL;
+ break;
+#else
+ TRUNCATE_TO(0,ptr);
+ continue;
+#endif
case INET_OPT_REUSEADDR:
type = SO_REUSEADDR;
break;
@@ -7342,7 +7694,7 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
type = TCP_NODELAY;
break;
-#ifdef HAVE_MULTICAST_SUPPORT
+#if defined(HAVE_MULTICAST_SUPPORT) && defined(IPPROTO_IP)
case UDP_OPT_MULTICAST_TTL:
proto = IPPROTO_IP;
type = IP_MULTICAST_TTL;
@@ -7361,10 +7713,10 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
arg_ptr = (char*) &li_val;
type = SO_LINGER;
break;
-#endif /* HAVE_MULTICAST_SUPPORT */
+#endif /* defined(HAVE_MULTICAST_SUPPORT) && defined(IPPROTO_IP) */
case INET_OPT_IPV6_V6ONLY:
-#if HAVE_DECL_IPV6_V6ONLY
+#if HAVE_DECL_IPV6_V6ONLY && defined(IPPROTO_IPV6)
proto = IPPROTO_IPV6;
type = IPV6_V6ONLY;
break;
@@ -7442,6 +7794,94 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
continue;
#endif
+#ifndef __WIN32__
+ /* Winsock does not have struct cmsghdr */
+ case INET_OPT_PKTOPTIONS: {
+ struct cmsghdr *cmsg, *cmsg_top;
+ SOCKLEN_T cmsg_sz;
+ union {
+ /* Ensure alignment */
+ struct cmsghdr hdr;
+ /* Room for (IP_TOS | IPV6_TCLASS) + IP_TTL */
+ char buf[2*CMSG_SPACE(sizeof(int))];
+ } cmsgbuf;
+ /* Select between IPv4 or IPv6 PKTOPTIONS
+ * depending on the socket protocol family
+ */
+ switch (desc->sfamily) {
+#if defined(IPPROTO_IP) && defined(IP_PKTOPTIONS)
+ case AF_INET: {
+ proto = IPPROTO_IP;
+ type = IP_PKTOPTIONS;
+ }
+ break;
+#endif
+#if defined(IPPROTO_IPV6) && defined(IPV6_PKTOPTIONS) && defined(AF_INET6)
+ case AF_INET6: {
+ proto = IPPROTO_IPV6;
+ type = IPV6_PKTOPTIONS;
+ }
+ break;
+#endif
+ default: {
+ RETURN_ERROR();
+ }
+ } /* switch */
+ TRUNCATE_TO(0, ptr);
+ /* Fetch a cmsg buffer from the socket */
+ cmsg_sz = sizeof(cmsgbuf.buf);
+ if (IS_SOCKET_ERROR(sock_getopt(desc->s, proto, type,
+ cmsgbuf.buf, &cmsg_sz))) {
+ continue;
+ }
+ /* Reply with Opt/8, Length/32, [COpt/8, Value/32]*
+ * i.e opt, total length and then all returned
+ * cmsg options and values
+ */
+ PLACE_FOR(1+4, ptr);
+ *ptr = opt;
+ arg_ptr = ptr+1; /* Where to put total length */
+ arg_sz = 0; /* Total length */
+ for (cmsg_top = (struct cmsghdr*)(cmsgbuf.buf + cmsg_sz),
+ cmsg = (struct cmsghdr*)cmsgbuf.buf;
+ cmsg < cmsg_top;
+ cmsg = NXT_CMSG_HDR(cmsg)) {
+#define PUT_CMSG_INT32(CMSG_LEVEL, CMSG_TYPE, OPT) \
+ if ((cmsg->cmsg_level == CMSG_LEVEL) && \
+ (cmsg->cmsg_type == CMSG_TYPE)) { \
+ PLACE_FOR(1+4, ptr); \
+ *ptr++ = OPT; \
+ put_cmsg_int32(cmsg, ptr); \
+ ptr += 4; \
+ arg_sz += 1+4; \
+ continue; \
+ }
+#if defined(IPPROTO_IP) && defined(IP_TOS)
+ PUT_CMSG_INT32(IPPROTO_IP, IP_TOS, INET_OPT_TOS);
+#endif
+#if defined(IPPROTO_IPV6) && defined(IPV6_TCLASS)
+ PUT_CMSG_INT32(IPPROTO_IPV6, IPV6_TCLASS, INET_OPT_TCLASS);
+#endif
+#if defined(IPPROTO_IP) && defined(IP_TTL)
+ PUT_CMSG_INT32(IPPROTO_IP, IP_TTL, INET_OPT_TTL);
+#endif
+ /* BSD uses the RECV* names in CMSG fields */
+ }
+#if defined(IPPROTO_IP) && defined(IP_RECVTOS)
+ PUT_CMSG_INT32(IPPROTO_IP, IP_RECVTOS, INET_OPT_TOS);
+#endif
+#if defined(IPPROTO_IPV6) && defined(IPV6_RECVTCLASS)
+ PUT_CMSG_INT32(IPPROTO_IPV6, IPV6_RECVTCLASS, INET_OPT_TCLASS);
+#endif
+#if defined(IPPROTO_IP) && defined(IP_RECVTTL)
+ PUT_CMSG_INT32(IPPROTO_IP, IP_RECVTTL, INET_OPT_TTL);
+#endif
+#undef PUT_CMSG_INT32
+ put_int32(arg_sz, arg_ptr); /* Put total length */
+ continue;
+ }
+#endif /* #ifdef __WIN32__ */
+
default:
RETURN_ERROR();
}
@@ -7501,6 +7941,7 @@ static int load_paddrinfo (ErlDrvTermData * spec, int i,
return i;
}
+
/*
** "sctp_fill_opts": Returns {ok, Results}, or an error:
*/
@@ -7750,6 +8191,7 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
case INET_OPT_PRIORITY :
case INET_OPT_TOS :
case INET_OPT_TCLASS :
+ case INET_OPT_TTL :
case INET_OPT_IPV6_V6ONLY:
case SCTP_OPT_AUTOCLOSE:
case SCTP_OPT_MAXSEG :
@@ -7757,6 +8199,9 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
case SCTP_OPT_NODELAY :
case SCTP_OPT_DISABLE_FRAGMENTS:
case SCTP_OPT_I_WANT_MAPPED_V4_ADDR:
+ case INET_OPT_RECVTOS :
+ case INET_OPT_RECVTCLASS :
+ case INET_OPT_RECVTTL :
{
int res = 0;
unsigned int sz = sizeof(res);
@@ -7812,8 +8257,8 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
}
case INET_OPT_TOS:
{
-# if defined(IP_TOS) && defined(SOL_IP)
- proto = SOL_IP;
+# if defined(IP_TOS) && defined(IPPROTO_IP)
+ proto = IPPROTO_IP;
type = IP_TOS;
is_int = 1;
tag = am_tos;
@@ -7825,8 +8270,8 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
}
case INET_OPT_TCLASS:
{
-# if defined(IPV6_TCLASS) && defined(SOL_IPV6)
- proto = SOL_IPV6;
+# if defined(IPV6_TCLASS) && defined(IPPROTO_IPV6)
+ proto = IPPROTO_IPV6;
type = IPV6_TCLASS;
is_int = 1;
tag = am_tclass;
@@ -7836,8 +8281,60 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
continue;
# endif
}
+ case INET_OPT_TTL:
+ {
+# if defined(IP_TTL) && defined(IPPROTO_IP)
+ proto = IPPROTO_IP;
+ type = IP_TTL;
+ is_int = 1;
+ tag = am_ttl;
+ break;
+# else
+ /* Not supported -- ignore */
+ continue;
+# endif
+ }
+ case INET_OPT_RECVTOS:
+ {
+# if defined(IP_RECVTOS) && defined(IPPROTO_IP)
+ proto = IPPROTO_IP;
+ type = IP_RECVTOS;
+ is_int = 0;
+ tag = am_recvtos;
+ break;
+# else
+ /* Not supported -- ignore */
+ continue;
+# endif
+ }
+ case INET_OPT_RECVTCLASS:
+ {
+# if defined(IPV6_RECVTCLASS) && defined(IPPROTO_IPV6)
+ proto = IPPROTO_IPV6;
+ type = IPV6_RECVTCLASS;
+ is_int = 0;
+ tag = am_recvtclass;
+ break;
+# else
+ /* Not supported -- ignore */
+ continue;
+# endif
+ }
+ case INET_OPT_RECVTTL:
+ {
+# if defined(IP_RECVTTL) && defined(IPPROTO_IP)
+ proto = IPPROTO_IP;
+ type = IP_RECVTTL;
+ is_int = 0;
+ tag = am_recvttl;
+ break;
+# else
+ /* Not supported -- ignore */
+ continue;
+# endif
+ }
case INET_OPT_IPV6_V6ONLY:
-# if HAVE_DECL_IPV6_V6ONLY
+# if HAVE_DECL_IPV6_V6ONLY && defined(IPPROTO_IPV6)
{
proto = IPPROTO_IPV6;
type = IPV6_V6ONLY;
@@ -8502,6 +8999,8 @@ static ErlDrvData inet_start(ErlDrvPort port, int size, int protocol)
desc->netns = NULL;
#endif
+ desc->recv_cmsgflags = 0;
+
return (ErlDrvData)desc;
}
@@ -11920,10 +12419,10 @@ static void packet_inet_command(ErlDrvData e, char* buf, ErlDrvSizeT len)
if (IS_SCTP(desc))
{
ErlDrvSizeT data_len;
- struct iovec iov[1]; /* For real data */
- struct msghdr mhdr; /* Message wrapper */
- struct sctp_sndrcvinfo *sri; /* The actual ancilary data */
- union { /* For ancilary data */
+ struct iovec iov[1]; /* For real data */
+ struct msghdr mhdr; /* Message wrapper */
+ struct sctp_sndrcvinfo *sri; /* The actual ancillary data */
+ union { /* For ancillary data */
struct cmsghdr hdr;
char ancd[CMSG_SPACE(sizeof(*sri))];
} cmsg;
@@ -11933,12 +12432,12 @@ static void packet_inet_command(ErlDrvData e, char* buf, ErlDrvSizeT len)
return;
}
- /* The ancilary data */
+ /* The ancillary data */
sri = (struct sctp_sndrcvinfo *) (CMSG_DATA(&cmsg.hdr));
/* Get the "sndrcvinfo" from the buffer, advancing the "ptr": */
ptr = sctp_get_sendparams(sri, ptr);
- /* The ancilary data wrapper */
+ /* The ancillary data wrapper */
cmsg.hdr.cmsg_level = IPPROTO_SCTP;
cmsg.hdr.cmsg_type = SCTP_SNDRCV;
cmsg.hdr.cmsg_len = CMSG_LEN(sizeof(*sri));
@@ -11953,7 +12452,7 @@ static void packet_inet_command(ErlDrvData e, char* buf, ErlDrvSizeT len)
iov[0].iov_base = ptr; /* The real data */
mhdr.msg_iov = iov;
mhdr.msg_iovlen = 1;
- mhdr.msg_control = cmsg.ancd; /* For ancilary data */
+ mhdr.msg_control = cmsg.ancd; /* For ancillary data */
mhdr.msg_controllen = cmsg.hdr.cmsg_len;
VALGRIND_MAKE_MEM_DEFINED(mhdr.msg_control, mhdr.msg_controllen); /*suppress "uninitialised bytes"*/
mhdr.msg_flags = 0; /* Not used with "sendmsg" */
@@ -12037,10 +12536,12 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
char abuf[sizeof(inet_address)]; /* buffer address; enough??? */
int packet_count = udesc->read_packets;
int count = 0; /* number of packets delivered to owner */
-#ifdef HAVE_SCTP
+#ifndef __WIN32__
struct msghdr mhdr; /* Top-level msg structure */
struct iovec iov[1]; /* Data or Notification Event */
- char ancd[SCTP_ANC_BUFF_SIZE]; /* Ancillary Data */
+ char ancd[ANC_BUFF_SIZE]; /* Ancillary Data */
+#endif
+#ifdef HAVE_SCTP
int short_recv = 0;
#endif
@@ -12089,7 +12590,7 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
mhdr.msg_iov = iov;
mhdr.msg_iovlen = 1;
mhdr.msg_control = ancd;
- mhdr.msg_controllen = SCTP_ANC_BUFF_SIZE;
+ mhdr.msg_controllen = ANC_BUFF_SIZE;
mhdr.msg_flags = 0; /* To be filled by "recvmsg" */
/* Do the actual SCTP receive: */
@@ -12104,6 +12605,24 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
other = desc->remote;
goto check_result;
}
+#ifndef __WIN32__
+ /* recvmsg() does not exist in the Winsock API */
+ if (desc->recv_cmsgflags) {
+ /* Use recvmsg() */
+ iov->iov_base = udesc->i_ptr;
+ iov->iov_len = desc->bufsz;
+ mhdr.msg_name = &other;
+ mhdr.msg_namelen = len;
+ mhdr.msg_iov = iov;
+ mhdr.msg_iovlen = 1;
+ mhdr.msg_control = ancd;
+ mhdr.msg_controllen = ANC_BUFF_SIZE;
+ mhdr.msg_flags = 0;
+ n = sock_recvmsg(desc->s, &mhdr, 0);
+ len = mhdr.msg_namelen;
+ goto check_result;
+ }
+#endif
n = sock_recvfrom(desc->s, udesc->i_ptr, desc->bufsz,
0, &other.sa, &len);
check_result:
@@ -12156,7 +12675,7 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
{
/* message received */
int code;
- void * extra = NULL;
+ void *mp;
char * ptr;
int nsz;
@@ -12187,14 +12706,18 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
udesc->i_ptr = NULL; /* not used from here */
}
}
+ mp = NULL;
#ifdef HAVE_SCTP
- if (IS_SCTP(desc)) extra = &mhdr;
+ if (IS_SCTP(desc)) mp = &mhdr;
+#endif
+#ifndef __WIN32__
+ if (desc->recv_cmsgflags) mp = &mhdr;
#endif
/* Actual parsing and return of the data received, occur here: */
code = packet_reply_binary_data(desc, len, udesc->i_buf,
(sizeof(other) - len),
nsz,
- extra);
+ mp);
free_buffer(udesc->i_buf);
udesc->i_buf = NULL;
if (code < 0)
diff --git a/erts/emulator/pcre/LICENCE b/erts/emulator/pcre/LICENCE
new file mode 100644
index 0000000000..f6ef7fd766
--- /dev/null
+++ b/erts/emulator/pcre/LICENCE
@@ -0,0 +1,93 @@
+PCRE LICENCE
+------------
+
+PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
+specified below. The documentation for PCRE, supplied in the "doc"
+directory, is distributed under the same terms as the software itself. The data
+in the testdata directory is not copyrighted and is in the public domain.
+
+The basic library functions are written in C and are freestanding. Also
+included in the distribution is a set of C++ wrapper functions, and a
+just-in-time compiler that can be used to optimize pattern matching. These
+are both optional features that can be omitted when the library is built.
+
+
+THE BASIC LIBRARY FUNCTIONS
+---------------------------
+
+Written by: Philip Hazel
+Email local part: ph10
+Email domain: cam.ac.uk
+
+University of Cambridge Computing Service,
+Cambridge, England.
+
+Copyright (c) 1997-2018 University of Cambridge
+All rights reserved.
+
+
+PCRE JUST-IN-TIME COMPILATION SUPPORT
+-------------------------------------
+
+Written by: Zoltan Herczeg
+Email local part: hzmester
+Emain domain: freemail.hu
+
+Copyright(c) 2010-2018 Zoltan Herczeg
+All rights reserved.
+
+
+STACK-LESS JUST-IN-TIME COMPILER
+--------------------------------
+
+Written by: Zoltan Herczeg
+Email local part: hzmester
+Emain domain: freemail.hu
+
+Copyright(c) 2009-2018 Zoltan Herczeg
+All rights reserved.
+
+
+THE C++ WRAPPER FUNCTIONS
+-------------------------
+
+Contributed by: Google Inc.
+
+Copyright (c) 2007-2012, Google Inc.
+All rights reserved.
+
+
+THE "BSD" LICENCE
+-----------------
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the name of Google
+ Inc. nor the names of their contributors may be used to endorse or
+ promote products derived from this software without specific prior
+ written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+End
diff --git a/erts/emulator/pcre/README.pcre_update.md b/erts/emulator/pcre/README.pcre_update.md
index 599e3d0d12..5df1e15bde 100644
--- a/erts/emulator/pcre/README.pcre_update.md
+++ b/erts/emulator/pcre/README.pcre_update.md
@@ -723,6 +723,12 @@ requires thorough reading of all new text. For the upgrade from 7.6
to 8.33, the update of the pcrepattern part of our manual page took
about eight hours.
+## Update Licence
+
+Copy the LICENCE file to `erts/emulator/pcre/LICENCE` and update
+the `[PCRE]` section in `system/COPYRIGHT` with the content of
+the `LICENCE` file.
+
## Add new relevant options to re
Then, when all this is done, you should add any new relevant options
diff --git a/erts/emulator/pcre/local_config.h b/erts/emulator/pcre/local_config.h
index c6af423d72..c3b4dab586 100644
--- a/erts/emulator/pcre/local_config.h
+++ b/erts/emulator/pcre/local_config.h
@@ -86,4 +86,4 @@
#define SUPPORT_UTF
/* Version number of package */
-#define VERSION "8.41"
+#define VERSION "8.42"
diff --git a/erts/emulator/pcre/pcre-8.41.tar.bz2 b/erts/emulator/pcre/pcre-8.41.tar.bz2
deleted file mode 100644
index 1798432dc9..0000000000
--- a/erts/emulator/pcre/pcre-8.41.tar.bz2
+++ /dev/null
Binary files differ
diff --git a/erts/emulator/pcre/pcre-8.42.tar.bz2 b/erts/emulator/pcre/pcre-8.42.tar.bz2
new file mode 100644
index 0000000000..61bfa38970
--- /dev/null
+++ b/erts/emulator/pcre/pcre-8.42.tar.bz2
Binary files differ
diff --git a/erts/emulator/pcre/pcre.h b/erts/emulator/pcre/pcre.h
index ab8f40cfc1..3563791223 100644
--- a/erts/emulator/pcre/pcre.h
+++ b/erts/emulator/pcre/pcre.h
@@ -43,9 +43,9 @@ POSSIBILITY OF SUCH DAMAGE.
/* The current PCRE version information. */
#define PCRE_MAJOR 8
-#define PCRE_MINOR 41
+#define PCRE_MINOR 42
#define PCRE_PRERELEASE
-#define PCRE_DATE 2017-07-05
+#define PCRE_DATE 2018-03-20
/* When an application links to a PCRE DLL in Windows, the symbols that are
imported have to be identified as such. When building PCRE, the appropriate
@@ -328,11 +328,11 @@ these bits, just add new ones on the end, in order to remain compatible. */
/* Types */
-struct real_pcre; /* declaration; the definition is private */
-typedef struct real_pcre pcre;
+struct real_pcre8_or_16; /* declaration; the definition is private */
+typedef struct real_pcre8_or_16 pcre;
-struct real_pcre16; /* declaration; the definition is private */
-typedef struct real_pcre16 pcre16;
+struct real_pcre8_or_16; /* declaration; the definition is private */
+typedef struct real_pcre8_or_16 pcre16;
struct real_pcre32; /* declaration; the definition is private */
typedef struct real_pcre32 pcre32;
diff --git a/erts/emulator/pcre/pcre_chartables.c b/erts/emulator/pcre/pcre_chartables.c
index b3d9020f25..06482c08d2 100644
--- a/erts/emulator/pcre/pcre_chartables.c
+++ b/erts/emulator/pcre/pcre_chartables.c
@@ -19,7 +19,9 @@ array definition from the final binary if PCRE is built into a static library
and dead code stripping is activated. This leads to link errors. Pulling in the
header ensures that the array gets flagged as "someone outside this compilation
unit might reference this" and so it will always be supplied to the linker. */
+
/* %ExternalCopyright% */
+
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
diff --git a/erts/emulator/pcre/pcre_compile.c b/erts/emulator/pcre/pcre_compile.c
index e79284ab79..ae7f6e2a2a 100644
--- a/erts/emulator/pcre/pcre_compile.c
+++ b/erts/emulator/pcre/pcre_compile.c
@@ -8061,7 +8061,7 @@ for (;; ptr++)
single group (i.e. not to a duplicated name. */
HANDLE_REFERENCE:
- if (firstcharflags == REQ_UNSET) firstcharflags = REQ_NONE;
+ if (firstcharflags == REQ_UNSET) zerofirstcharflags = firstcharflags = REQ_NONE;
previous = code;
item_hwm_offset = cd->hwm - cd->start_workspace;
*code++ = ((options & PCRE_CASELESS) != 0)? OP_REFI : OP_REF;
diff --git a/erts/emulator/pcre/pcre_dfa_exec.c b/erts/emulator/pcre/pcre_dfa_exec.c
index c859d67fc7..c101656fd7 100644
--- a/erts/emulator/pcre/pcre_dfa_exec.c
+++ b/erts/emulator/pcre/pcre_dfa_exec.c
@@ -2288,12 +2288,14 @@ for (;;)
case OP_NOTI:
if (clen > 0)
{
- unsigned int otherd;
+ pcre_uint32 otherd;
#ifdef SUPPORT_UTF
if (utf && d >= 128)
{
#ifdef SUPPORT_UCP
otherd = UCD_OTHERCASE(d);
+#else
+ otherd = d;
#endif /* SUPPORT_UCP */
}
else
diff --git a/erts/emulator/pcre/pcre_exec.c b/erts/emulator/pcre/pcre_exec.c
index 6708ba92a6..1946e97a72 100644
--- a/erts/emulator/pcre/pcre_exec.c
+++ b/erts/emulator/pcre/pcre_exec.c
@@ -6,7 +6,7 @@
and semantics are as close as possible to those of the Perl 5 language.
Written by Philip Hazel
- Copyright (c) 1997-2014 University of Cambridge
+ Copyright (c) 1997-2018 University of Cambridge
-----------------------------------------------------------------------------
Redistribution and use in source and binary forms, with or without
@@ -2407,7 +2407,7 @@ for (;;)
case OP_ANY:
if (IS_NEWLINE(eptr)) RRETURN(MATCH_NOMATCH);
if (md->partial != 0 &&
- eptr + 1 >= md->end_subject &&
+ eptr == md->end_subject - 1 &&
NLBLOCK->nltype == NLTYPE_FIXED &&
NLBLOCK->nllen == 2 &&
UCHAR21TEST(eptr) == NLBLOCK->nl[0])
@@ -3167,7 +3167,7 @@ for (;;)
{
RMATCH(eptr, ecode, offset_top, md, eptrb, RM18);
if (rrc != MATCH_NOMATCH) RRETURN(rrc);
- if (eptr-- == pp) break; /* Stop if tried at original pos */
+ if (eptr-- <= pp) break; /* Stop if tried at original pos */
BACKCHAR(eptr);
}
}
@@ -3326,7 +3326,7 @@ for (;;)
{
RMATCH(eptr, ecode, offset_top, md, eptrb, RM21);
if (rrc != MATCH_NOMATCH) RRETURN(rrc);
- if (eptr-- == pp) break; /* Stop if tried at original pos */
+ if (eptr-- <= pp) break; /* Stop if tried at original pos */
#ifdef SUPPORT_UTF
if (utf) BACKCHAR(eptr);
#endif
diff --git a/erts/emulator/pcre/pcre_jit_compile.c b/erts/emulator/pcre/pcre_jit_compile.c
index 932ca2c389..926e40f6d3 100644
--- a/erts/emulator/pcre/pcre_jit_compile.c
+++ b/erts/emulator/pcre/pcre_jit_compile.c
@@ -164,7 +164,6 @@ typedef struct jit_arguments {
const pcre_uchar *begin;
const pcre_uchar *end;
int *offsets;
- pcre_uchar *uchar_ptr;
pcre_uchar *mark_ptr;
void *callout_data;
/* Everything else after. */
@@ -214,7 +213,7 @@ enum control_types {
type_then_trap = 1
};
-typedef int (SLJIT_CALL *jit_function)(jit_arguments *args);
+typedef int (SLJIT_FUNC *jit_function)(jit_arguments *args);
/* The following structure is the key data type for the recursive
code generator. It is allocated by compile_matchingpath, and contains
@@ -489,9 +488,24 @@ typedef struct compare_context {
/* Used for accessing the elements of the stack. */
#define STACK(i) ((i) * (int)sizeof(sljit_sw))
+#ifdef SLJIT_PREF_SHIFT_REG
+#if SLJIT_PREF_SHIFT_REG == SLJIT_R2
+/* Nothing. */
+#elif SLJIT_PREF_SHIFT_REG == SLJIT_R3
+#define SHIFT_REG_IS_R3
+#else
+#error "Unsupported shift register"
+#endif
+#endif
+
#define TMP1 SLJIT_R0
+#ifdef SHIFT_REG_IS_R3
+#define TMP2 SLJIT_R3
+#define TMP3 SLJIT_R2
+#else
#define TMP2 SLJIT_R2
#define TMP3 SLJIT_R3
+#endif
#define STR_PTR SLJIT_S0
#define STR_END SLJIT_S1
#define STACK_TOP SLJIT_R1
@@ -520,13 +534,10 @@ the start pointers when the end of the capturing group has not yet reached. */
#if defined COMPILE_PCRE8
#define MOV_UCHAR SLJIT_MOV_U8
-#define MOVU_UCHAR SLJIT_MOVU_U8
#elif defined COMPILE_PCRE16
#define MOV_UCHAR SLJIT_MOV_U16
-#define MOVU_UCHAR SLJIT_MOVU_U16
#elif defined COMPILE_PCRE32
#define MOV_UCHAR SLJIT_MOV_U32
-#define MOVU_UCHAR SLJIT_MOVU_U32
#else
#error Unsupported compiling mode
#endif
@@ -2383,12 +2394,25 @@ if (length < 8)
}
else
{
- GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START);
- OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1);
- loop = LABEL();
- OP1(SLJIT_MOVU, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw), SLJIT_R0, 0);
- OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1);
- JUMPTO(SLJIT_NOT_ZERO, loop);
+ if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)) == SLJIT_SUCCESS)
+ {
+ GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START);
+ OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1);
+ loop = LABEL();
+ sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw));
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1);
+ JUMPTO(SLJIT_NOT_ZERO, loop);
+ }
+ else
+ {
+ GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START + sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1);
+ loop = LABEL();
+ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R1), 0, SLJIT_R0, 0);
+ OP2(SLJIT_ADD, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, sizeof(sljit_sw));
+ OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1);
+ JUMPTO(SLJIT_NOT_ZERO, loop);
+ }
}
}
@@ -2421,12 +2445,25 @@ if (length < 8)
}
else
{
- GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + sizeof(sljit_sw));
- OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2);
- loop = LABEL();
- OP1(SLJIT_MOVU, SLJIT_MEM1(TMP2), sizeof(sljit_sw), TMP1, 0);
- OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1);
- JUMPTO(SLJIT_NOT_ZERO, loop);
+ if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)) == SLJIT_SUCCESS)
+ {
+ GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + sizeof(sljit_sw));
+ OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2);
+ loop = LABEL();
+ sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw));
+ OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1);
+ JUMPTO(SLJIT_NOT_ZERO, loop);
+ }
+ else
+ {
+ GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + 2 * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2);
+ loop = LABEL();
+ OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, TMP1, 0);
+ OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, SLJIT_IMM, sizeof(sljit_sw));
+ OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1);
+ JUMPTO(SLJIT_NOT_ZERO, loop);
+ }
}
OP1(SLJIT_MOV, STACK_TOP, 0, ARGUMENTS, 0);
@@ -2436,10 +2473,10 @@ if (common->control_head_ptr != 0)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_IMM, 0);
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(jit_arguments, stack));
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->start_ptr);
-OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(struct sljit_stack, base));
+OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), SLJIT_OFFSETOF(struct sljit_stack, end));
}
-static sljit_sw SLJIT_CALL do_search_mark(sljit_sw *current, const pcre_uchar *skip_arg)
+static sljit_sw SLJIT_FUNC do_search_mark(sljit_sw *current, const pcre_uchar *skip_arg)
{
while (current != NULL)
{
@@ -2460,7 +2497,7 @@ while (current != NULL)
SLJIT_ASSERT(current[0] == 0 || current < (sljit_sw*)current[0]);
current = (sljit_sw*)current[0];
}
-return -1;
+return 0;
}
static SLJIT_INLINE void copy_ovector(compiler_common *common, int topbracket)
@@ -2468,6 +2505,7 @@ static SLJIT_INLINE void copy_ovector(compiler_common *common, int topbracket)
DEFINE_COMPILER;
struct sljit_label *loop;
struct sljit_jump *early_quit;
+BOOL has_pre;
/* At this point we can freely use all registers. */
OP1(SLJIT_MOV, SLJIT_S2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(1));
@@ -2481,17 +2519,30 @@ if (common->mark_ptr != 0)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, mark_ptr), SLJIT_R2, 0);
OP2(SLJIT_SUB, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, offsets), SLJIT_IMM, sizeof(int));
OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, begin));
-GET_LOCAL_BASE(SLJIT_S0, 0, OVECTOR_START);
+
+has_pre = sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)) == SLJIT_SUCCESS;
+GET_LOCAL_BASE(SLJIT_S0, 0, OVECTOR_START - (has_pre ? sizeof(sljit_sw) : 0));
+
/* Unlikely, but possible */
early_quit = CMP(SLJIT_EQUAL, SLJIT_R1, 0, SLJIT_IMM, 0);
loop = LABEL();
-OP2(SLJIT_SUB, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_S0), 0, SLJIT_R0, 0);
-OP2(SLJIT_ADD, SLJIT_S0, 0, SLJIT_S0, 0, SLJIT_IMM, sizeof(sljit_sw));
+
+if (has_pre)
+ sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw));
+else
+ {
+ OP1(SLJIT_MOV, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_S0), 0);
+ OP2(SLJIT_ADD, SLJIT_S0, 0, SLJIT_S0, 0, SLJIT_IMM, sizeof(sljit_sw));
+ }
+
+OP2(SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, sizeof(int));
+OP2(SLJIT_SUB, SLJIT_S1, 0, SLJIT_S1, 0, SLJIT_R0, 0);
/* Copy the integer value to the output buffer */
#if defined COMPILE_PCRE16 || defined COMPILE_PCRE32
OP2(SLJIT_ASHR, SLJIT_S1, 0, SLJIT_S1, 0, SLJIT_IMM, UCHAR_SHIFT);
#endif
-OP1(SLJIT_MOVU_S32, SLJIT_MEM1(SLJIT_R2), sizeof(int), SLJIT_S1, 0);
+
+OP1(SLJIT_MOV_S32, SLJIT_MEM1(SLJIT_R2), 0, SLJIT_S1, 0);
OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
JUMPTO(SLJIT_NOT_ZERO, loop);
JUMPHERE(early_quit);
@@ -2499,14 +2550,29 @@ JUMPHERE(early_quit);
/* Calculate the return value, which is the maximum ovector value. */
if (topbracket > 1)
{
- GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + topbracket * 2 * sizeof(sljit_sw));
- OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1);
+ if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw))) == SLJIT_SUCCESS)
+ {
+ GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + topbracket * 2 * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1);
- /* OVECTOR(0) is never equal to SLJIT_S2. */
- loop = LABEL();
- OP1(SLJIT_MOVU, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw)));
- OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
- CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop);
+ /* OVECTOR(0) is never equal to SLJIT_S2. */
+ loop = LABEL();
+ sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw)));
+ OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
+ CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop);
+ }
+ else
+ {
+ GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + (topbracket - 1) * 2 * sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1);
+
+ /* OVECTOR(0) is never equal to SLJIT_S2. */
+ loop = LABEL();
+ OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), 0);
+ OP2(SLJIT_SUB, SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 2 * (sljit_sw)sizeof(sljit_sw));
+ OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
+ CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop);
+ }
OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_R1, 0);
}
else
@@ -5167,93 +5233,190 @@ OP_FLAGS(SLJIT_OR | SLJIT_SET_Z, TMP2, 0, SLJIT_EQUAL);
sljit_emit_fast_return(compiler, RETURN_ADDR, 0);
}
-#define CHAR1 STR_END
-#define CHAR2 STACK_TOP
-
static void do_casefulcmp(compiler_common *common)
{
DEFINE_COMPILER;
struct sljit_jump *jump;
struct sljit_label *label;
+int char1_reg;
+int char2_reg;
-sljit_emit_fast_enter(compiler, RETURN_ADDR, 0);
+if (sljit_get_register_index(TMP3) < 0)
+ {
+ char1_reg = STR_END;
+ char2_reg = STACK_TOP;
+ }
+else
+ {
+ char1_reg = TMP3;
+ char2_reg = RETURN_ADDR;
+ }
+
+sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0);
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP2, 0);
-OP1(SLJIT_MOV, TMP3, 0, CHAR1, 0);
-OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, CHAR2, 0);
-OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
-OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
-label = LABEL();
-OP1(MOVU_UCHAR, CHAR1, 0, SLJIT_MEM1(TMP1), IN_UCHARS(1));
-OP1(MOVU_UCHAR, CHAR2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
-jump = CMP(SLJIT_NOT_EQUAL, CHAR1, 0, CHAR2, 0);
-OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
-JUMPTO(SLJIT_NOT_ZERO, label);
+if (char1_reg == STR_END)
+ {
+ OP1(SLJIT_MOV, TMP3, 0, char1_reg, 0);
+ OP1(SLJIT_MOV, RETURN_ADDR, 0, char2_reg, 0);
+ }
-JUMPHERE(jump);
-OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
-OP1(SLJIT_MOV, CHAR1, 0, TMP3, 0);
-OP1(SLJIT_MOV, CHAR2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
-sljit_emit_fast_return(compiler, RETURN_ADDR, 0);
-}
+if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
+ {
+ label = LABEL();
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
+ jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
+ JUMPTO(SLJIT_NOT_ZERO, label);
+
+ JUMPHERE(jump);
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+ }
+else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
+ {
+ OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
+ OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+
+ label = LABEL();
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
+ jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
+ JUMPTO(SLJIT_NOT_ZERO, label);
-#define LCC_TABLE STACK_LIMIT
+ JUMPHERE(jump);
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+ }
+else
+ {
+ label = LABEL();
+ OP1(MOV_UCHAR, char1_reg, 0, SLJIT_MEM1(TMP1), 0);
+ OP1(MOV_UCHAR, char2_reg, 0, SLJIT_MEM1(STR_PTR), 0);
+ OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
+ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+ jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0);
+ OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
+ JUMPTO(SLJIT_NOT_ZERO, label);
+
+ JUMPHERE(jump);
+ OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+ }
+
+if (char1_reg == STR_END)
+ {
+ OP1(SLJIT_MOV, char1_reg, 0, TMP3, 0);
+ OP1(SLJIT_MOV, char2_reg, 0, RETURN_ADDR, 0);
+ }
+
+sljit_emit_fast_return(compiler, TMP1, 0);
+}
static void do_caselesscmp(compiler_common *common)
{
DEFINE_COMPILER;
struct sljit_jump *jump;
struct sljit_label *label;
+int char1_reg = STR_END;
+int char2_reg;
+int lcc_table;
+int opt_type = 0;
-sljit_emit_fast_enter(compiler, RETURN_ADDR, 0);
+if (sljit_get_register_index(TMP3) < 0)
+ {
+ char2_reg = STACK_TOP;
+ lcc_table = STACK_LIMIT;
+ }
+else
+ {
+ char2_reg = RETURN_ADDR;
+ lcc_table = TMP3;
+ }
+
+if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
+ opt_type = 1;
+else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
+ opt_type = 2;
+
+sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0);
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP2, 0);
-OP1(SLJIT_MOV, TMP3, 0, LCC_TABLE, 0);
-OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, CHAR1, 0);
-OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, CHAR2, 0);
-OP1(SLJIT_MOV, LCC_TABLE, 0, SLJIT_IMM, common->lcc);
-OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
-OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, char1_reg, 0);
+
+if (char2_reg == STACK_TOP)
+ {
+ OP1(SLJIT_MOV, TMP3, 0, char2_reg, 0);
+ OP1(SLJIT_MOV, RETURN_ADDR, 0, lcc_table, 0);
+ }
+
+OP1(SLJIT_MOV, lcc_table, 0, SLJIT_IMM, common->lcc);
+
+if (opt_type == 1)
+ {
+ label = LABEL();
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
+ }
+else if (opt_type == 2)
+ {
+ OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
+ OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+
+ label = LABEL();
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
+ sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
+ }
+else
+ {
+ label = LABEL();
+ OP1(MOV_UCHAR, char1_reg, 0, SLJIT_MEM1(TMP1), 0);
+ OP1(MOV_UCHAR, char2_reg, 0, SLJIT_MEM1(STR_PTR), 0);
+ OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
+ }
-label = LABEL();
-OP1(MOVU_UCHAR, CHAR1, 0, SLJIT_MEM1(TMP1), IN_UCHARS(1));
-OP1(MOVU_UCHAR, CHAR2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
#ifndef COMPILE_PCRE8
-jump = CMP(SLJIT_GREATER, CHAR1, 0, SLJIT_IMM, 255);
+jump = CMP(SLJIT_GREATER, char1_reg, 0, SLJIT_IMM, 255);
#endif
-OP1(SLJIT_MOV_U8, CHAR1, 0, SLJIT_MEM2(LCC_TABLE, CHAR1), 0);
+OP1(SLJIT_MOV_U8, char1_reg, 0, SLJIT_MEM2(lcc_table, char1_reg), 0);
#ifndef COMPILE_PCRE8
JUMPHERE(jump);
-jump = CMP(SLJIT_GREATER, CHAR2, 0, SLJIT_IMM, 255);
+jump = CMP(SLJIT_GREATER, char2_reg, 0, SLJIT_IMM, 255);
#endif
-OP1(SLJIT_MOV_U8, CHAR2, 0, SLJIT_MEM2(LCC_TABLE, CHAR2), 0);
+OP1(SLJIT_MOV_U8, char2_reg, 0, SLJIT_MEM2(lcc_table, char2_reg), 0);
#ifndef COMPILE_PCRE8
JUMPHERE(jump);
#endif
-jump = CMP(SLJIT_NOT_EQUAL, CHAR1, 0, CHAR2, 0);
+
+if (opt_type == 0)
+ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
+
+jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0);
OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
JUMPTO(SLJIT_NOT_ZERO, label);
JUMPHERE(jump);
-OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
-OP1(SLJIT_MOV, LCC_TABLE, 0, TMP3, 0);
-OP1(SLJIT_MOV, CHAR1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
-OP1(SLJIT_MOV, CHAR2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
-sljit_emit_fast_return(compiler, RETURN_ADDR, 0);
-}
+OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+
+if (opt_type == 2)
+ OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
-#undef LCC_TABLE
-#undef CHAR1
-#undef CHAR2
+if (char2_reg == STACK_TOP)
+ {
+ OP1(SLJIT_MOV, char2_reg, 0, TMP3, 0);
+ OP1(SLJIT_MOV, lcc_table, 0, RETURN_ADDR, 0);
+ }
+
+OP1(SLJIT_MOV, char1_reg, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
+sljit_emit_fast_return(compiler, TMP1, 0);
+}
#if defined SUPPORT_UTF && defined SUPPORT_UCP
-static const pcre_uchar * SLJIT_CALL do_utf_caselesscmp(pcre_uchar *src1, jit_arguments *args, pcre_uchar *end1)
+static const pcre_uchar * SLJIT_FUNC do_utf_caselesscmp(pcre_uchar *src1, pcre_uchar *src2, pcre_uchar *end1, pcre_uchar *end2)
{
/* This function would be ineffective to do in JIT level. */
sljit_u32 c1, c2;
-const pcre_uchar *src2 = args->uchar_ptr;
-const pcre_uchar *end2 = args->end;
const ucd_record *ur;
const sljit_u32 *pp;
@@ -6776,32 +6939,37 @@ else
#if defined SUPPORT_UTF && defined SUPPORT_UCP
if (common->utf && *cc == OP_REFI)
{
- SLJIT_ASSERT(TMP1 == SLJIT_R0 && STACK_TOP == SLJIT_R1 && TMP2 == SLJIT_R2);
+ SLJIT_ASSERT(TMP1 == SLJIT_R0 && STACK_TOP == SLJIT_R1);
if (ref)
- OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1));
+ OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1));
else
- OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw));
+ OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(TMP2), sizeof(sljit_sw));
if (withchecks)
- jump = CMP(SLJIT_EQUAL, TMP1, 0, TMP2, 0);
+ jump = CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_R2, 0);
- /* Needed to save important temporary registers. */
+ /* No free saved registers so save data on stack. */
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0);
- OP1(SLJIT_MOV, SLJIT_R1, 0, ARGUMENTS, 0);
- OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_R1), SLJIT_OFFSETOF(jit_arguments, uchar_ptr), STR_PTR, 0);
- sljit_emit_ijump(compiler, SLJIT_CALL3, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_utf_caselesscmp));
+ OP1(SLJIT_MOV, SLJIT_R1, 0, STR_PTR, 0);
+ OP1(SLJIT_MOV, SLJIT_R3, 0, STR_END, 0);
+ sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW) | SLJIT_ARG3(SW) | SLJIT_ARG4(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_utf_caselesscmp));
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_RETURN_REG, 0);
+
if (common->mode == JIT_COMPILE)
add_jump(compiler, backtracks, CMP(SLJIT_LESS_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1));
else
{
- add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0));
- nopartial = CMP(SLJIT_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1);
+ OP2(SLJIT_SUB | SLJIT_SET_Z | SLJIT_SET_LESS, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 1);
+
+ add_jump(compiler, backtracks, JUMP(SLJIT_LESS));
+
+ nopartial = JUMP(SLJIT_NOT_EQUAL);
+ OP1(SLJIT_MOV, STR_PTR, 0, STR_END, 0);
check_partial(common, FALSE);
add_jump(compiler, backtracks, JUMP(SLJIT_JUMP));
JUMPHERE(nopartial);
}
- OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_RETURN_REG, 0);
}
else
#endif /* SUPPORT_UTF && SUPPORT_UCP */
@@ -7125,7 +7293,7 @@ add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IM
return cc + 1 + LINK_SIZE;
}
-static int SLJIT_CALL do_callout(struct jit_arguments *arguments, PUBL(callout_block) *callout_block, pcre_uchar **jit_ovector)
+static sljit_s32 SLJIT_FUNC do_callout(struct jit_arguments *arguments, PUBL(callout_block) *callout_block, pcre_uchar **jit_ovector)
{
const pcre_uchar *begin = arguments->begin;
int *offset_vector = arguments->offsets;
@@ -7207,18 +7375,17 @@ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0);
/* SLJIT_R0 = arguments */
OP1(SLJIT_MOV, SLJIT_R1, 0, STACK_TOP, 0);
GET_LOCAL_BASE(SLJIT_R2, 0, OVECTOR_START);
-sljit_emit_ijump(compiler, SLJIT_CALL3, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_callout));
-OP1(SLJIT_MOV_S32, SLJIT_RETURN_REG, 0, SLJIT_RETURN_REG, 0);
+sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(S32) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW) | SLJIT_ARG3(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_callout));
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
free_stack(common, CALLOUT_ARG_SIZE / sizeof(sljit_sw));
/* Check return value. */
-OP2(SLJIT_SUB | SLJIT_SET_Z | SLJIT_SET_SIG_GREATER, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
-add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_SIG_GREATER));
+OP2(SLJIT_SUB32 | SLJIT_SET_Z | SLJIT_SET_SIG_GREATER, SLJIT_UNUSED, 0, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
+add_jump(compiler, &backtrack->topbacktracks, JUMP(SLJIT_SIG_GREATER32));
if (common->forced_quit_label == NULL)
- add_jump(compiler, &common->forced_quit, JUMP(SLJIT_NOT_EQUAL) /* SIG_LESS */);
+ add_jump(compiler, &common->forced_quit, JUMP(SLJIT_NOT_EQUAL32) /* SIG_LESS */);
else
- JUMPTO(SLJIT_NOT_EQUAL /* SIG_LESS */, common->forced_quit_label);
+ JUMPTO(SLJIT_NOT_EQUAL32 /* SIG_LESS */, common->forced_quit_label);
return cc + 2 + 2 * LINK_SIZE;
}
@@ -10439,11 +10606,11 @@ if (opcode == OP_SKIP_ARG)
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STACK_TOP, 0);
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, (sljit_sw)(current->cc + 2));
- sljit_emit_ijump(compiler, SLJIT_CALL2, SLJIT_IMM, SLJIT_FUNC_OFFSET(do_search_mark));
+ sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(do_search_mark));
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
OP1(SLJIT_MOV, STR_PTR, 0, TMP1, 0);
- add_jump(compiler, &common->reset_match, CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, -1));
+ add_jump(compiler, &common->reset_match, CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, SLJIT_IMM, 0));
return;
}
@@ -11031,7 +11198,7 @@ if (!compiler)
common->compiler = compiler;
/* Main pcre_jit_exec entry. */
-sljit_emit_enter(compiler, 0, 1, 5, 5, 0, 0, private_data_size);
+sljit_emit_enter(compiler, 0, SLJIT_ARG1(SW), 5, 5, 0, 0, private_data_size);
/* Register init. */
reset_ovector(common, (re->top_bracket + 1) * 2);
@@ -11044,8 +11211,8 @@ OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str))
OP1(SLJIT_MOV, STR_END, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, end));
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack));
OP1(SLJIT_MOV_U32, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, limit_match));
-OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, base));
-OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, limit));
+OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, end));
+OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, start));
OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LIMIT_MATCH, TMP1, 0);
@@ -11251,20 +11418,22 @@ common->quit_label = quit_label;
set_jumps(common->stackalloc, LABEL());
/* RETURN_ADDR is not a saved register. */
sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0);
-OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, TMP2, 0);
-OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0);
-OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack));
-OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, top), STACK_TOP, 0);
-OP2(SLJIT_SUB, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, limit), SLJIT_IMM, STACK_GROWTH_RATE);
-sljit_emit_ijump(compiler, SLJIT_CALL2, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_stack_resize));
-jump = CMP(SLJIT_NOT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
-OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0);
-OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack));
-OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, top));
-OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(struct sljit_stack, limit));
-OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
-sljit_emit_fast_return(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+SLJIT_ASSERT(TMP1 == SLJIT_R0 && STACK_TOP == SLJIT_R1);
+
+OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, STACK_TOP, 0);
+OP1(SLJIT_MOV, SLJIT_R0, 0, ARGUMENTS, 0);
+OP2(SLJIT_SUB, SLJIT_R1, 0, STACK_LIMIT, 0, SLJIT_IMM, STACK_GROWTH_RATE);
+OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, stack));
+OP1(SLJIT_MOV, STACK_LIMIT, 0, TMP2, 0);
+
+sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_RET(SW) | SLJIT_ARG1(SW) | SLJIT_ARG2(SW), SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_stack_resize));
+jump = CMP(SLJIT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
+OP1(SLJIT_MOV, TMP2, 0, STACK_LIMIT, 0);
+OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_RETURN_REG, 0);
+OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
+OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
+sljit_emit_fast_return(compiler, TMP1, 0);
/* Allocation failed. */
JUMPHERE(jump);
@@ -11409,9 +11578,9 @@ union {
sljit_u8 local_space[MACHINE_STACK_SIZE];
struct sljit_stack local_stack;
-local_stack.max_limit = local_space;
-local_stack.limit = local_space;
-local_stack.base = local_space + MACHINE_STACK_SIZE;
+local_stack.min_start = local_space;
+local_stack.start = local_space;
+local_stack.end = local_space + MACHINE_STACK_SIZE;
local_stack.top = local_space + MACHINE_STACK_SIZE;
arguments->stack = &local_stack;
convert_executable_func.executable_func = executable_func;
@@ -11536,7 +11705,7 @@ if ((options & PCRE_PARTIAL_HARD) != 0)
else if ((options & PCRE_PARTIAL_SOFT) != 0)
mode = JIT_PARTIAL_SOFT_COMPILE;
-if (functions->executable_funcs[mode] == NULL)
+if (functions == NULL || functions->executable_funcs[mode] == NULL)
return PCRE_ERROR_JIT_BADOPTION;
/* Sanity checks should be handled by pcre_exec. */
diff --git a/erts/emulator/pcre/pcre_latin_1_table.c b/erts/emulator/pcre/pcre_latin_1_table.c
index aa29275a94..d6cf38fa3b 100644
--- a/erts/emulator/pcre/pcre_latin_1_table.c
+++ b/erts/emulator/pcre/pcre_latin_1_table.c
@@ -14,6 +14,7 @@ Pulling in the header ensures that the array gets flagged as "someone
outside this compilation unit might reference this" and so it will always
be supplied to the linker. */
/* %ExternalCopyright% */
+
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
@@ -120,7 +121,7 @@ print, punct, and cntrl. Other classes are built from combinations. */
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0xfe,0xff,0xff,0x07,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x04,0x20,0x04,
0x00,0x00,0x00,0x80,0xff,0xff,0x7f,0xff,
0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x03,
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index 9f115706dc..f421794f91 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -916,7 +916,7 @@ enif_select(ErlNifEnv* env,
ctl_op = ERTS_POLL_OP_DEL;
}
else {
- on = 1;
+ on = !(mode & ERL_NIF_SELECT_CANCEL);
ASSERT(mode);
if (mode & ERL_DRV_READ) {
ctl_events |= ERTS_POLL_EV_IN;
@@ -1042,38 +1042,51 @@ enif_select(ErlNifEnv* env,
ret = 0;
}
else { /* off */
+ ret = 0;
if (state->type == ERTS_EV_TYPE_NIF) {
- state->driver.nif->in.pid = NIL;
- state->driver.nif->out.pid = NIL;
- }
- ASSERT(state->events==0);
- if (!wake_poller) {
- /*
- * Safe to close fd now as it is not in pollset
- * or there was no need to eject fd (kernel poll)
- */
- if (state->type == ERTS_EV_TYPE_NIF) {
- ASSERT(state->driver.stop.resource == resource);
- call_stop = CALL_STOP_AND_RELEASE;
- state->driver.stop.resource = NULL;
+ if (mode & ERL_NIF_SELECT_READ
+ && is_not_nil(state->driver.nif->in.pid)) {
+ state->driver.nif->in.pid = NIL;
+ ret |= ERL_NIF_SELECT_READ_CANCELLED;
}
- else {
- ASSERT(!state->driver.stop.resource);
- call_stop = CALL_STOP;
+ if (mode & ERL_NIF_SELECT_WRITE
+ && is_not_nil(state->driver.nif->out.pid)) {
+ state->driver.nif->out.pid = NIL;
+ ret |= ERL_NIF_SELECT_WRITE_CANCELLED;
}
- state->type = ERTS_EV_TYPE_NONE;
- ret = ERL_NIF_SELECT_STOP_CALLED;
}
- else {
- /* Not safe to close fd, postpone stop_select callback. */
- if (state->type == ERTS_EV_TYPE_NONE) {
- ASSERT(!state->driver.stop.resource);
- state->driver.stop.resource = resource;
- enif_keep_resource(resource);
+ if (mode & ERL_NIF_SELECT_STOP) {
+ ASSERT(state->events==0);
+ if (!wake_poller) {
+ /*
+ * Safe to close fd now as it is not in pollset
+ * or there was no need to eject fd (kernel poll)
+ */
+ if (state->type == ERTS_EV_TYPE_NIF) {
+ ASSERT(state->driver.stop.resource == resource);
+ call_stop = CALL_STOP_AND_RELEASE;
+ state->driver.stop.resource = NULL;
+ }
+ else {
+ ASSERT(!state->driver.stop.resource);
+ call_stop = CALL_STOP;
+ }
+ state->type = ERTS_EV_TYPE_NONE;
+ ret |= ERL_NIF_SELECT_STOP_CALLED;
+ }
+ else {
+ /* Not safe to close fd, postpone stop_select callback. */
+ if (state->type == ERTS_EV_TYPE_NONE) {
+ ASSERT(!state->driver.stop.resource);
+ state->driver.stop.resource = resource;
+ enif_keep_resource(resource);
+ }
+ state->type = ERTS_EV_TYPE_STOP_NIF;
+ ret |= ERL_NIF_SELECT_STOP_SCHEDULED;
}
- state->type = ERTS_EV_TYPE_STOP_NIF;
- ret = ERL_NIF_SELECT_STOP_SCHEDULED;
}
+ else
+ ASSERT(mode & ERL_NIF_SELECT_CANCEL);
}
done:
@@ -1251,7 +1264,8 @@ print_nif_select_op(erts_dsprintf_buf_t *dsbufp,
(int) fd,
mode & ERL_NIF_SELECT_READ ? " READ" : "",
mode & ERL_NIF_SELECT_WRITE ? " WRITE" : "",
- mode & ERL_NIF_SELECT_STOP ? " STOP" : "",
+ (mode & ERL_NIF_SELECT_STOP ? " STOP"
+ : (mode & ERL_NIF_SELECT_CANCEL ? " CANCEL" : "")),
resource->type->module,
resource->type->name,
ref);
@@ -2332,10 +2346,16 @@ drvmode2str(int mode) {
static ERTS_INLINE char *
nifmode2str(enum ErlNifSelectFlags mode) {
+ if (mode & ERL_NIF_SELECT_STOP)
+ return "STOP";
switch (mode) {
case ERL_NIF_SELECT_READ: return "READ";
case ERL_NIF_SELECT_WRITE: return "WRITE";
- case ERL_NIF_SELECT_STOP: return "STOP";
+ case ERL_NIF_SELECT_READ|ERL_NIF_SELECT_WRITE: return "READ|WRITE";
+ case ERL_NIF_SELECT_CANCEL|ERL_NIF_SELECT_READ: return "CANCEL|READ";
+ case ERL_NIF_SELECT_CANCEL|ERL_NIF_SELECT_WRITE: return "CANCEL|WRITE";
+ case ERL_NIF_SELECT_CANCEL|ERL_NIF_SELECT_READ|ERL_NIF_SELECT_WRITE:
+ return "CANCEL|READ|WRITE";
default: return "UNKNOWN";
}
}
diff --git a/erts/emulator/sys/common/erl_osenv.c b/erts/emulator/sys/common/erl_osenv.c
index 487ff87116..6a16377736 100644
--- a/erts/emulator/sys/common/erl_osenv.c
+++ b/erts/emulator/sys/common/erl_osenv.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2017. All Rights Reserved.
+ * Copyright Ericsson AB 2017-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/erts/emulator/sys/unix/erl_child_setup.c b/erts/emulator/sys/unix/erl_child_setup.c
index 221ee2a69d..129861ebd5 100644
--- a/erts/emulator/sys/unix/erl_child_setup.c
+++ b/erts/emulator/sys/unix/erl_child_setup.c
@@ -133,6 +133,7 @@ static int sigchld_pipe[2];
static int
start_new_child(int pipes[])
{
+ struct sigaction sa;
int errln = -1;
int size, res, i, pos = 0;
char *buff, *o_buff;
@@ -143,6 +144,16 @@ start_new_child(int pipes[])
/* only child executes here */
+ /* Restore default handling of sigterm... */
+ sa.sa_handler = SIG_DFL;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = 0;
+
+ if (sigaction(SIGTERM, &sa, 0) == -1) {
+ perror(NULL);
+ exit(1);
+ }
+
do {
res = read(pipes[0], (char*)&size, sizeof(size));
} while(res < 0 && (errno == EINTR || errno == ERRNO_BLOCK));
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 36579ffdb4..4823e549ea 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2017. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index fcf5a0d533..39bb4d515e 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2017. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index a2f3489943..edad62a9fb 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -485,12 +485,14 @@ t_on_load(Config) when is_list(Config) ->
-define(ERL_NIF_SELECT_READ, (1 bsl 0)).
-define(ERL_NIF_SELECT_WRITE, (1 bsl 1)).
-define(ERL_NIF_SELECT_STOP, (1 bsl 2)).
+-define(ERL_NIF_SELECT_CANCEL, (1 bsl 3)).
-define(ERL_NIF_SELECT_STOP_CALLED, (1 bsl 0)).
-define(ERL_NIF_SELECT_STOP_SCHEDULED, (1 bsl 1)).
-define(ERL_NIF_SELECT_INVALID_EVENT, (1 bsl 2)).
-define(ERL_NIF_SELECT_FAILED, (1 bsl 3)).
-
+-define(ERL_NIF_SELECT_READ_CANCELLED, (1 bsl 4)).
+-define(ERL_NIF_SELECT_WRITE_CANCELLED, (1 bsl 5)).
select(Config) when is_list(Config) ->
ensure_lib_loaded(Config),
@@ -516,7 +518,16 @@ select(Config) when is_list(Config) ->
end),
0 = select_nif(R,?ERL_NIF_SELECT_READ,R,Pid,Ref),
{Pid, done} = receive_any(1000),
+
+ %% Cancel read
+ 0 = select_nif(R,?ERL_NIF_SELECT_READ bor ?ERL_NIF_SELECT_CANCEL,R,null,Ref),
<<"hej">> = read_nif(R, 3),
+ 0 = select_nif(R,?ERL_NIF_SELECT_READ,R,null,Ref),
+ ?ERL_NIF_SELECT_READ_CANCELLED =
+ select_nif(R,?ERL_NIF_SELECT_READ bor ?ERL_NIF_SELECT_CANCEL,R,null,Ref),
+ ok = write_nif(W, <<"hej again">>),
+ [] = flush(0),
+ <<"hej again">> = read_nif(R, 9),
%% Wait for write
Written = write_full(W, $a),
@@ -525,6 +536,15 @@ select(Config) when is_list(Config) ->
Written = read_nif(R,byte_size(Written)),
[{select, W, Ref, ready_output}] = flush(),
+ %% Cancel write
+ 0 = select_nif(W,?ERL_NIF_SELECT_WRITE bor ?ERL_NIF_SELECT_CANCEL,W,null,Ref),
+ Written2 = write_full(W, $b),
+ 0 = select_nif(W,?ERL_NIF_SELECT_WRITE,W,null,Ref),
+ ?ERL_NIF_SELECT_WRITE_CANCELLED =
+ select_nif(W,?ERL_NIF_SELECT_WRITE bor ?ERL_NIF_SELECT_CANCEL,W,null,Ref),
+ Written2 = read_nif(R,byte_size(Written2)),
+ [] = flush(0),
+
%% Close write and wait for EOF
eagain = read_nif(R, 1),
check_stop_ret(select_nif(W,?ERL_NIF_SELECT_STOP,W,null,Ref)),
diff --git a/erts/emulator/test/node_container_SUITE.erl b/erts/emulator/test/node_container_SUITE.erl
index 7df001fec5..300b4ed036 100644
--- a/erts/emulator/test/node_container_SUITE.erl
+++ b/erts/emulator/test/node_container_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2002-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2002-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -50,7 +50,8 @@
bad_nc/1,
unique_pid/1,
iter_max_procs/1,
- magic_ref/1]).
+ magic_ref/1,
+ dist_entry_gc/1]).
suite() ->
[{ct_hooks,[ts_install_cth]},
@@ -58,7 +59,7 @@ suite() ->
all() ->
- [term_to_binary_to_term_eq, round_trip_eq, cmp, ref_eq,
+ [dist_entry_gc, term_to_binary_to_term_eq, round_trip_eq, cmp, ref_eq,
node_table_gc, dist_link_refc, dist_monitor_refc,
node_controller_refc, ets_refc, match_spec_refc,
timer_refc, pid_wrap, port_wrap, bad_nc,
@@ -894,6 +895,29 @@ magic_ref(Config) when is_list(Config) ->
true = is_reference(MRef2),
true = erts_debug:get_internal_state({magic_ref,MRef2}),
ok.
+
+
+lost_pending_connection(Node) ->
+ _ = (catch erts_internal:new_connection(Node)),
+ ok.
+
+dist_entry_gc(Config) when is_list(Config) ->
+ Me = self(),
+ {ok, Node} = start_node(get_nodefirstname(), "+zdntgc 0"),
+ P = spawn_link(Node,
+ fun () ->
+ LostNode = list_to_atom("lost_pending_connection@" ++ hostname()),
+ lost_pending_connection(LostNode),
+ garbage_collect(), %% Could crash...
+ Me ! {self(), ok}
+ end),
+ receive
+ {P, ok} -> ok
+ end,
+ unlink(P),
+ stop_node(Node),
+ ok.
+
%%
%% -- Internal utils ---------------------------------------------------------
%%
diff --git a/erts/emulator/test/ref_SUITE.erl b/erts/emulator/test/ref_SUITE.erl
index 74df857c65..925c30caa5 100644
--- a/erts/emulator/test/ref_SUITE.erl
+++ b/erts/emulator/test/ref_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index 7eebbe8b19..f04efb9003 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -2155,7 +2155,7 @@ workers_exit([Ps|Pss]) ->
workers_exit(Pss).
do_work(PartTime) ->
- lists:reverse(lists:seq(1, 50)),
+ _ = id(lists:seq(1, 50)),
receive stop_work -> receive after infinity -> ok end after 0 -> ok end,
case PartTime of
true -> receive after 1 -> ok end;
@@ -2163,6 +2163,8 @@ do_work(PartTime) ->
end,
do_work(PartTime).
+id(I) -> I.
+
workers(N, _Prio, _PartTime) when N =< 0 ->
[];
workers(N, Prio, PartTime) ->
diff --git a/erts/preloaded/ebin/prim_file.beam b/erts/preloaded/ebin/prim_file.beam
index 3316e4348c..df611f2bb0 100644
--- a/erts/preloaded/ebin/prim_file.beam
+++ b/erts/preloaded/ebin/prim_file.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_inet.beam b/erts/preloaded/ebin/prim_inet.beam
index c30c589d3a..4a345f8152 100644
--- a/erts/preloaded/ebin/prim_inet.beam
+++ b/erts/preloaded/ebin/prim_inet.beam
Binary files differ
diff --git a/erts/preloaded/src/erts.app.src b/erts/preloaded/src/erts.app.src
index 1ccc4988c2..8c34c99a98 100644
--- a/erts/preloaded/src/erts.app.src
+++ b/erts/preloaded/src/erts.app.src
@@ -38,7 +38,7 @@
{registered, []},
{applications, []},
{env, []},
- {runtime_dependencies, ["stdlib-3.5", "kernel-6.0", "sasl-3.0.1"]}
+ {runtime_dependencies, ["stdlib-3.5", "kernel-6.1", "sasl-3.0.1"]}
]}.
%% vim: ft=erlang
diff --git a/erts/preloaded/src/prim_file.erl b/erts/preloaded/src/prim_file.erl
index 517ca74301..6d85868183 100644
--- a/erts/preloaded/src/prim_file.erl
+++ b/erts/preloaded/src/prim_file.erl
@@ -786,7 +786,7 @@ altname_nif(_Path) ->
%% We know for certain that lists:reverse/2 is a BIF, so it's safe to use it
%% even though this module is preloaded.
-reverse_list(List) -> lists:reverse(List).
+reverse_list(List) -> lists:reverse(List, []).
proplist_get_value(_Key, [], Default) ->
Default;
diff --git a/erts/preloaded/src/prim_inet.erl b/erts/preloaded/src/prim_inet.erl
index bcb54cca42..b746aab049 100644
--- a/erts/preloaded/src/prim_inet.erl
+++ b/erts/preloaded/src/prim_inet.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -29,7 +29,7 @@
-export([open/3, open/4, fdopen/4, fdopen/5, close/1]).
-export([bind/3, listen/1, listen/2, peeloff/2]).
-export([connect/3, connect/4, async_connect/4]).
--export([accept/1, accept/2, async_accept/2]).
+-export([accept/1, accept/2, accept/3, async_accept/2]).
-export([shutdown/2]).
-export([send/2, send/3, sendto/4, sendmsg/3, sendfile/4]).
-export([recv/2, recv/3, async_recv/3]).
@@ -307,7 +307,7 @@ async_connect0(S, Addr, Time) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
-%% ACCEPT(insock() [,Timeout] ) -> {ok,insock()} | {error, Reason}
+%% ACCEPT(insock() [,Timeout][,FamilyOpts] ) -> {ok,insock()} | {error, Reason}
%%
%% accept incoming connection on listen socket
%% if timeout is given:
@@ -315,6 +315,8 @@ async_connect0(S, Addr, Time) ->
%% 0 -> immediate accept (poll)
%% > 0 -> wait for timeout ms for accept if no accept then
%% return {error, timeout}
+%% FamilyOpts are address family specific options to copy from
+%% listen socket to accepted socket
%%
%% ASYNC_ACCEPT(insock(), Timeout)
%%
@@ -325,17 +327,22 @@ async_connect0(S, Addr, Time) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% For TCP sockets only.
%%
-accept(L) -> accept0(L, -1).
+accept(L) -> accept0(L, -1, []).
-accept(L, infinity) -> accept0(L, -1);
-accept(L, Time) -> accept0(L, Time).
+accept(L, infinity) -> accept0(L, -1, []);
+accept(L, FamilyOpts) when is_list(FamilyOpts) -> accept0(L, -1, FamilyOpts);
+accept(L, Time) -> accept0(L, Time, []).
-accept0(L, Time) when is_port(L), is_integer(Time) ->
+accept(L, infinity, FamilyOpts) -> accept0(L, -1, FamilyOpts);
+accept(L, Time, FamilyOpts) -> accept0(L, Time, FamilyOpts).
+
+accept0(L, Time, FamilyOpts)
+ when is_port(L), is_integer(Time), is_list(FamilyOpts) ->
case async_accept(L, Time) of
{ok, Ref} ->
receive
{inet_async, L, Ref, {ok,S}} ->
- accept_opts(L, S);
+ accept_opts(L, S, FamilyOpts);
{inet_async, L, Ref, Error} ->
Error
end;
@@ -343,25 +350,22 @@ accept0(L, Time) when is_port(L), is_integer(Time) ->
end.
%% setup options from listen socket on the connected socket
-accept_opts(L, S) ->
- case getopts(L, [active, nodelay, keepalive, delay_send, priority, tos]) of
+accept_opts(L, S, FamilyOpts) ->
+ case
+ getopts(
+ L,
+ [active, nodelay, keepalive, delay_send, priority]
+ ++ FamilyOpts)
+ of
{ok, Opts} ->
- case setopts(S, Opts) of
- ok ->
- case getopts(L, [tclass]) of
- {ok, []} ->
- {ok, S};
- {ok, TClassOpts} ->
- case setopts(S, TClassOpts) of
- ok ->
- {ok, S};
- Error -> close(S), Error
- end
- end;
- Error -> close(S), Error
- end;
- Error ->
- close(S), Error
+ case setopts(S, Opts) of
+ ok ->
+ {ok, S};
+ Error1 ->
+ close(S), Error1
+ end;
+ Error2 ->
+ close(S), Error2
end.
async_accept(L, Time) ->
@@ -616,7 +620,16 @@ recvfrom0(S, Length, Time)
Ref = ?u16(R1,R0),
receive
% Success, UDP:
+ {inet_async, S, Ref, {ok, {[F | AddrData], AncData}}} ->
+ %% With ancillary data
+ case get_addr(F, AddrData) of
+ {{Family, _} = Addr, Data} when is_atom(Family) ->
+ {ok, {Addr, 0, AncData, Data}};
+ {{IP, Port}, Data} ->
+ {ok, {IP, Port, AncData, Data}}
+ end;
{inet_async, S, Ref, {ok, [F | AddrData]}} ->
+ %% Without ancillary data
case get_addr(F, AddrData) of
{{Family, _} = Addr, Data} when is_atom(Family) ->
{ok, {Addr, 0, Data}};
@@ -1256,6 +1269,11 @@ enc_opt(recbuf) -> ?INET_OPT_RCVBUF;
enc_opt(priority) -> ?INET_OPT_PRIORITY;
enc_opt(tos) -> ?INET_OPT_TOS;
enc_opt(tclass) -> ?INET_OPT_TCLASS;
+enc_opt(recvtos) -> ?INET_OPT_RECVTOS;
+enc_opt(recvtclass) -> ?INET_OPT_RECVTCLASS;
+enc_opt(pktoptions) -> ?INET_OPT_PKTOPTIONS;
+enc_opt(ttl) -> ?INET_OPT_TTL;
+enc_opt(recvttl) -> ?INET_OPT_RECVTTL;
enc_opt(nodelay) -> ?TCP_OPT_NODELAY;
enc_opt(multicast_if) -> ?UDP_OPT_MULTICAST_IF;
enc_opt(multicast_ttl) -> ?UDP_OPT_MULTICAST_TTL;
@@ -1318,6 +1336,11 @@ dec_opt(?INET_OPT_PRIORITY) -> priority;
dec_opt(?INET_OPT_TOS) -> tos;
dec_opt(?INET_OPT_TCLASS) -> tclass;
dec_opt(?TCP_OPT_NODELAY) -> nodelay;
+dec_opt(?INET_OPT_RECVTOS) -> recvtos;
+dec_opt(?INET_OPT_RECVTCLASS) -> recvtclass;
+dec_opt(?INET_OPT_PKTOPTIONS) -> pktoptions;
+dec_opt(?INET_OPT_TTL) -> ttl;
+dec_opt(?INET_OPT_RECVTTL) -> recvttl;
dec_opt(?UDP_OPT_MULTICAST_IF) -> multicast_if;
dec_opt(?UDP_OPT_MULTICAST_TTL) -> multicast_ttl;
dec_opt(?UDP_OPT_MULTICAST_LOOP) -> multicast_loop;
@@ -1393,6 +1416,11 @@ type_opt_1(recbuf) -> int;
type_opt_1(priority) -> int;
type_opt_1(tos) -> int;
type_opt_1(tclass) -> int;
+type_opt_1(recvtos) -> bool;
+type_opt_1(recvtclass) -> bool;
+type_opt_1(pktoptions) -> opts;
+type_opt_1(ttl) -> int;
+type_opt_1(recvttl) -> bool;
type_opt_1(nodelay) -> bool;
type_opt_1(ipv6_v6only) -> bool;
%% multicast
@@ -1899,6 +1927,11 @@ dec_value(binary,[L0,L1,L2,L3|List]) ->
Len = ?i32(L0,L1,L2,L3),
{X,T}=split(Len,List),
{list_to_binary(X),T};
+dec_value(opts, [L0,L1,L2,L3|List]) ->
+ Len = ?u32(L0,L1,L2,L3),
+ {X,T} = split(Len, List),
+ Opts = dec_opt_val(X),
+ {Opts,T};
dec_value(Types, List) when is_tuple(Types) ->
{L,T} = dec_value_tuple(Types, List, 1, []),
{list_to_tuple(L),T};
diff --git a/erts/vsn.mk b/erts/vsn.mk
index feb51e42d2..e28716c37f 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -18,7 +18,7 @@
# %CopyrightEnd%
#
-VSN = 10.0.6
+VSN = 10.0.8
# Port number 4365 in 4.2
# Port number 4366 in 4.3