aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile.in13
-rw-r--r--configure.in4
-rw-r--r--erts/doc/src/Makefile11
-rw-r--r--erts/doc/src/atomics.xml183
-rw-r--r--erts/doc/src/counters.xml142
-rw-r--r--erts/doc/src/notes.xml89
-rw-r--r--erts/doc/src/persistent_term.xml290
-rw-r--r--erts/doc/src/ref_man.xml3
-rw-r--r--erts/doc/src/specs.xml3
-rw-r--r--erts/emulator/Makefile.in35
-rw-r--r--erts/emulator/beam/atom.c2
-rw-r--r--erts/emulator/beam/atom.names4
-rw-r--r--erts/emulator/beam/beam_bif_load.c59
-rw-r--r--erts/emulator/beam/bif.c4
-rw-r--r--erts/emulator/beam/bif.tab26
-rw-r--r--erts/emulator/beam/copy.c10
-rw-r--r--erts/emulator/beam/erl_alloc.types6
-rw-r--r--erts/emulator/beam/erl_bif_atomics.c256
-rw-r--r--erts/emulator/beam/erl_bif_counters.c219
-rw-r--r--erts/emulator/beam/erl_bif_lists.c830
-rw-r--r--erts/emulator/beam/erl_bif_persistent.c983
-rw-r--r--erts/emulator/beam/erl_bif_unique.h6
-rw-r--r--erts/emulator/beam/erl_dirty_bif.tab2
-rw-r--r--erts/emulator/beam/erl_gc.c19
-rw-r--r--erts/emulator/beam/erl_init.c1
-rw-r--r--erts/emulator/beam/erl_lock_check.c2
-rw-r--r--erts/emulator/beam/erl_process_dump.c40
-rw-r--r--erts/emulator/beam/erl_utils.h61
-rw-r--r--erts/emulator/beam/external.c3
-rw-r--r--erts/emulator/beam/global.h11
-rw-r--r--erts/emulator/beam/sys.h3
-rw-r--r--erts/emulator/beam/utils.c64
-rw-r--r--erts/emulator/sys/unix/sys_uds.c5
-rw-r--r--erts/emulator/test/Makefile3
-rw-r--r--erts/emulator/test/atomics_SUITE.erl147
-rw-r--r--erts/emulator/test/code_SUITE.erl37
-rw-r--r--erts/emulator/test/counters_SUITE.erl112
-rw-r--r--erts/emulator/test/persistent_term_SUITE.erl614
-rw-r--r--erts/etc/unix/run_erl.c6
-rw-r--r--erts/preloaded/ebin/atomics.beambin0 -> 3300 bytes
-rw-r--r--erts/preloaded/ebin/counters.beambin0 -> 2808 bytes
-rw-r--r--erts/preloaded/ebin/erts_internal.beambin16676 -> 17508 bytes
-rw-r--r--erts/preloaded/ebin/init.beambin51428 -> 51500 bytes
-rw-r--r--erts/preloaded/ebin/persistent_term.beambin0 -> 1652 bytes
-rw-r--r--erts/preloaded/ebin/prim_inet.beambin80864 -> 82260 bytes
-rw-r--r--erts/preloaded/src/Makefile5
-rw-r--r--erts/preloaded/src/atomics.erl119
-rw-r--r--erts/preloaded/src/counters.erl90
-rw-r--r--erts/preloaded/src/erts.app.src4
-rw-r--r--erts/preloaded/src/erts_internal.erl31
-rw-r--r--erts/preloaded/src/init.erl1
-rw-r--r--erts/preloaded/src/persistent_term.erl55
-rw-r--r--erts/preloaded/src/prim_inet.erl143
-rw-r--r--lib/compiler/src/beam_jump.erl15
-rw-r--r--lib/compiler/test/beam_jump_SUITE.erl19
-rw-r--r--lib/crypto/src/crypto.erl2
-rw-r--r--lib/crypto/test/engine_SUITE.erl15
-rw-r--r--lib/eldap/doc/src/notes.xml33
-rw-r--r--lib/hipe/main/hipe.erl2
-rw-r--r--lib/kernel/doc/src/logger.xml92
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml30
-rw-r--r--lib/kernel/doc/src/logger_disk_log_h.xml10
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml4
-rw-r--r--lib/kernel/src/inet_tcp_dist.erl2
-rw-r--r--lib/kernel/src/logger.erl35
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl81
-rw-r--r--lib/kernel/src/logger_h_common.erl5
-rw-r--r--lib/kernel/src/logger_server.erl146
-rw-r--r--lib/kernel/src/logger_std_h.erl67
-rw-r--r--lib/kernel/test/gen_tcp_misc_SUITE.erl146
-rw-r--r--lib/kernel/test/logger_SUITE.erl12
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl179
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl175
-rw-r--r--lib/observer/src/Makefile1
-rw-r--r--lib/observer/src/cdv_html_wx.erl26
-rw-r--r--lib/observer/src/cdv_persistent_cb.erl32
-rw-r--r--lib/observer/src/cdv_wx.erl16
-rw-r--r--lib/observer/src/crashdump_viewer.erl237
-rw-r--r--lib/observer/src/observer.app.src1
-rw-r--r--lib/observer/src/observer_html_lib.erl3
-rw-r--r--lib/observer/test/crashdump_helper.erl49
-rw-r--r--lib/observer/test/crashdump_viewer_SUITE.erl32
-rw-r--r--lib/os_mon/c_src/cpu_sup.c50
-rw-r--r--lib/os_mon/src/cpu_sup.erl26
-rw-r--r--lib/public_key/asn1/OTP-PKIX.asn124
-rw-r--r--lib/public_key/src/public_key.erl4
-rw-r--r--lib/public_key/test/public_key_SUITE.erl11
-rw-r--r--lib/sasl/src/sasl.app.src2
-rw-r--r--lib/sasl/src/systools_make.erl4
-rw-r--r--lib/ssh/doc/src/Makefile3
-rw-r--r--lib/ssh/doc/src/notes.xml17
-rw-r--r--lib/ssh/doc/src/ref_man.xml1
-rw-r--r--lib/ssh/doc/src/specs.xml1
-rw-r--r--lib/ssh/doc/src/ssh.xml113
-rw-r--r--lib/ssh/doc/src/ssh_app.xml7
-rw-r--r--lib/ssh/doc/src/ssh_file.xml275
-rw-r--r--lib/ssh/doc/src/ssh_sftp.xml1
-rw-r--r--lib/ssh/doc/src/terminology.xml185
-rw-r--r--lib/ssh/doc/src/usersguide.xml1
-rw-r--r--lib/ssh/doc/src/using_ssh.xml11
-rw-r--r--lib/ssh/src/ssh.hrl24
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl4
-rw-r--r--lib/ssh/src/ssh_file.erl17
-rw-r--r--lib/ssh/src/ssh_options.erl12
-rw-r--r--lib/ssh/src/ssh_transport.erl3
-rw-r--r--lib/ssh/test/.gitignore5
-rw-r--r--lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl20
-rw-r--r--lib/ssh/test/property_test/ssh_eqc_client_server.erl230
-rw-r--r--lib/ssh/test/ssh_compat_SUITE.erl13
-rwxr-xr-xlib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all6
-rw-r--r--lib/ssh/test/ssh_property_test_SUITE.erl7
-rw-r--r--lib/ssl/doc/src/notes.xml34
-rw-r--r--lib/ssl/src/dtls_record.erl22
-rw-r--r--lib/ssl/src/inet_tls_dist.erl2
-rw-r--r--lib/ssl/src/ssl_cipher.erl90
-rw-r--r--lib/ssl/src/ssl_cipher.hrl3
-rw-r--r--lib/ssl/src/ssl_connection.erl4
-rw-r--r--lib/ssl/src/ssl_handshake.erl7
-rw-r--r--lib/ssl/src/ssl_pem_cache.erl23
-rw-r--r--lib/ssl/src/ssl_record.erl79
-rw-r--r--lib/ssl/src/tls_connection.erl3
-rw-r--r--lib/ssl/src/tls_record.erl24
-rw-r--r--lib/ssl/src/tls_sender.erl2
-rw-r--r--lib/ssl/test/ssl_ECC_SUITE.erl17
-rw-r--r--lib/ssl/test/ssl_basic_SUITE.erl48
-rw-r--r--lib/ssl/test/ssl_bench_SUITE.erl1
-rw-r--r--lib/ssl/test/ssl_bench_test_lib.erl4
-rw-r--r--lib/ssl/test/ssl_engine_SUITE.erl15
-rw-r--r--lib/ssl/test/ssl_test_lib.erl87
-rw-r--r--lib/stdlib/doc/src/beam_lib.xml4
-rw-r--r--lib/stdlib/doc/src/gen_event.xml2
-rw-r--r--lib/stdlib/doc/src/lists.xml8
-rw-r--r--lib/stdlib/doc/src/notes.xml31
-rw-r--r--lib/stdlib/src/beam_lib.erl2
-rw-r--r--lib/stdlib/test/gen_fsm_SUITE.erl2
-rw-r--r--lib/stdlib/test/lists_SUITE.erl49
-rw-r--r--lib/stdlib/test/sys_SUITE.erl17
-rwxr-xr-xmake/fixup_development_runtime_dependencies111
-rw-r--r--make/otp_patch_solve_forward_merge_version1
-rw-r--r--make/otp_version_tickets1
-rw-r--r--otp_versions.table8
-rw-r--r--system/doc/efficiency_guide/commoncaveats.xml48
-rw-r--r--system/doc/efficiency_guide/profiling.xml2
-rw-r--r--system/doc/efficiency_guide/retired_myths.xml14
144 files changed, 7128 insertions, 937 deletions
diff --git a/Makefile.in b/Makefile.in
index d880bfefa2..9f053963c4 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -161,6 +161,8 @@ ERLANG_LIBDIR = $(DESTDIR)$(ERLANG_INST_LIBDIR)
# Must be GNU make!
MAKE = @MAKE_PROG@
+PERL = @PERL@
+
NATIVE_LIBS_ENABLED = @NATIVE_LIBS_ENABLED@
ifeq ($(NATIVE_LIBS_ENABLED),yes)
@@ -327,16 +329,16 @@ ifneq ($(CROSS_COMPILING),yes)
# Not cross compiling
ifeq ($(BOOTSTRAP_ONLY),yes)
-all: bootstrap
+all: bootstrap check_dev_rt_dep
else
# The normal case; not cross compiling, and not bootstrap only build.
-all: bootstrap libs local_setup
+all: bootstrap libs local_setup check_dev_rt_dep
endif
else
# Cross compiling
-all: cross_check_erl depend emulator libs start_scripts
+all: cross_check_erl depend emulator libs start_scripts check_dev_rt_dep
endif
@@ -356,7 +358,10 @@ erlang_inst_libdir_configured:
bootstrap: depend all_bootstraps
-
+check_dev_rt_dep:
+ @if `grep DEVELOPMENT "$(ERL_TOP)/make/otp_version_tickets" 1>/dev/null 2>&1`; then \
+ LANG=C "$(PERL)" "$(ERL_TOP)/make/fixup_development_runtime_dependencies" "$(ERL_TOP)"; \
+ fi
ifeq ($(OTP_STRICT_INSTALL),yes)
diff --git a/configure.in b/configure.in
index 559049aca1..2a42477723 100644
--- a/configure.in
+++ b/configure.in
@@ -185,6 +185,10 @@ fi
AC_PROG_LN_S
AC_PROG_RANLIB
+LM_PROG_PERL5
+if test "$ac_cv_path_PERL" = false; then
+ AC_MSG_ERROR([Perl version 5 is required!])
+fi
#
# Get erts version from erts/vsn.mk
diff --git a/erts/doc/src/Makefile b/erts/doc/src/Makefile
index 21aa3db864..40f74b78ff 100644
--- a/erts/doc/src/Makefile
+++ b/erts/doc/src/Makefile
@@ -52,18 +52,17 @@ XML_REF3_EFILES = \
erlang.xml \
erl_tracer.xml \
init.xml \
+ persistent_term.xml \
+ atomics.xml \
+ counters.xml \
zlib.xml
XML_REF3_FILES = \
+ $(XML_REF3_EFILES) \
driver_entry.xml \
erl_nif.xml \
- erl_tracer.xml \
erl_driver.xml \
- erl_prim_loader.xml \
- erlang.xml \
- erts_alloc.xml \
- init.xml \
- zlib.xml
+ erts_alloc.xml
XML_PART_FILES = \
part.xml
diff --git a/erts/doc/src/atomics.xml b/erts/doc/src/atomics.xml
new file mode 100644
index 0000000000..3fca92fb97
--- /dev/null
+++ b/erts/doc/src/atomics.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ </legalnotice>
+
+ <title>atomics</title>
+ </header>
+ <module>atomics</module>
+ <modulesummary>Atomic Functions</modulesummary>
+ <description>
+ <p>This module provides a set of functions to do atomic operations towards
+ mutable atomic variables. The implementation utilizes only
+ atomic hardware instructions without any software level locking, which makes
+ it very efficient for concurrent access. The atomics are organized into
+ arrays with the follwing semantics:</p>
+ <list type="bulleted">
+ <item>
+ <p>Atomics are 64 bit integers.</p>
+ </item>
+ <item>
+ <p>Atomics can be represented as either signed or unsigned.</p>
+ </item>
+ <item>
+ <p>Atomics wrap around at overflow and underflow operations.</p>
+ </item>
+ <item>
+ <p>All operations guarantee atomicity. No intermediate results can be
+ seen. The result of one mutation can only be the input to one
+ following mutation.</p>
+ </item>
+ <item>
+ <p>All atomic operations are mutually ordered. If atomic B is updated
+ <em>after</em> atomic A, then that is how it will appear to any
+ concurrent readers. No one can read the new value of B and then read the
+ old value of A.</p>
+ </item>
+ <item>
+ <p>Indexes into atomic arrays are one-based. An atomic array of
+ arity N contains N atomics with index from 1 to N.</p>
+ </item>
+ </list>
+ </description>
+
+ <datatypes>
+ <datatype>
+ <name name="atomics_ref"/>
+ <desc><p>Identifies an atomic array returned from
+ <seealso marker="#new/2"><c>new/2</c></seealso>.</p>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <funcs>
+ <func>
+ <name name="new" arity="2"/>
+ <fsummary>Create atomic array</fsummary>
+ <desc>
+ <p>Create a new atomic array of <c><anno>Arity</anno></c> atomics.</p>
+ <p>Argument <c><anno>Opts</anno></c> is a list of the following possible
+ options:</p>
+ <taglist>
+ <tag><c>{signed, boolean()}</c></tag>
+ <item><p>Indicate if the elements of the array will be treated
+ as signed or unsigned integers. Default is <c>true</c> (signed).</p>
+ <p>The integer interval for signed atomics are from <c>-(1 bsl 63)</c>
+ to <c>(1 bsl 63)-1</c> and for unsigned atomics from <c>0</c> to <c>(1
+ bsl 64)-1</c>.</p>
+ </item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
+ <name name="put" arity="3"/>
+ <fsummary>Set atomic value</fsummary>
+ <desc>
+ <p>Set atomic to <c><anno>Value</anno></c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get" arity="2"/>
+ <fsummary>Read atomic value</fsummary>
+ <desc>
+ <p>Read atomic value.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add" arity="3"/>
+ <fsummary>Add to atomic</fsummary>
+ <desc>
+ <p>Add <c><anno>Incr</anno></c> to atomic.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_get" arity="3"/>
+ <fsummary>Atomic add and get</fsummary>
+ <desc>
+ <p>Atomic addition and return of the result.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="sub" arity="3"/>
+ <fsummary>Subtract from atomic</fsummary>
+ <desc>
+ <p>Subtract <c><anno>Decr</anno></c> from atomic.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="sub_get" arity="3"/>
+ <fsummary>Atomic sub and get</fsummary>
+ <desc>
+ <p>Atomic subtraction and return of the result.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="exchange" arity="3"/>
+ <fsummary>Atomic exchange.</fsummary>
+ <desc>
+ <p>Atomically replaces the value of the atomic with
+ <c><anno>Desired</anno></c> and returns the value it held
+ previously.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="compare_exchange" arity="4"/>
+ <fsummary>Atomic compare and exchange.</fsummary>
+ <desc>
+ <p>Atomically compares the atomic with <c><anno>Expected</anno></c>,
+ and if those are equal, set atomic to <c><anno>Desired</anno></c>.
+ Returns <c>ok</c> if <c><anno>Desired</anno></c> was written. Returns
+ the actual atomic value if not equal to <c><anno>Expected</anno></c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="info" arity="1"/>
+ <fsummary>Get information about atomic array.</fsummary>
+ <desc>
+ <p>Return information about an atomic array in a map. The map
+ has the following keys:</p>
+ <taglist>
+ <tag><c>size</c></tag>
+ <item><p>The number of atomics in the array.</p></item>
+ <tag><c>max</c></tag>
+ <item><p>The highest possible value an atomic in this array can
+ hold.</p></item>
+ <tag><c>min</c></tag>
+ <item><p>The lowest possible value an atomic in this array can
+ hold.</p></item>
+ <tag><c>memory</c></tag>
+ <item><p>Approximate memory consumption for the array in
+ bytes.</p></item>
+ </taglist>
+ </desc>
+ </func>
+
+ </funcs>
+</erlref>
diff --git a/erts/doc/src/counters.xml b/erts/doc/src/counters.xml
new file mode 100644
index 0000000000..85eedfdadc
--- /dev/null
+++ b/erts/doc/src/counters.xml
@@ -0,0 +1,142 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ </legalnotice>
+
+ <title>counters</title>
+ </header>
+ <module>counters</module>
+ <modulesummary>Counter Functions</modulesummary>
+ <description>
+ <p>This module provides a set of functions to do operations towards
+ shared mutable counter variables. The implementation does not utilize any
+ software level locking, which makes it very efficient for concurrent
+ access. The counters are organized into arrays with the follwing
+ semantics:</p>
+ <list type="bulleted">
+ <item>
+ <p>Counters are 64 bit signed integers.</p>
+ </item>
+ <item>
+ <p>Counters wrap around at overflow and underflow operations.</p>
+ </item>
+ <item><p>Counters are initialized to zero and can then only be written to
+ by adding or subtracting.</p>
+ </item>
+ <item>
+ <p>Write operations guarantee atomicity. No intermediate results can be
+ seen from a single write operation.</p>
+ </item>
+ <item>
+ <p>Two types of counter arrays can be created with options <c>atomics</c> or
+ <c>write_concurrency</c>. The <c>atomics</c> counters have good allround
+ performance with nice consistent semantics while
+ <c>write_concurrency</c> counters offers even better concurrent
+ write performance at the expense of some potential read
+ inconsistencies. See <seealso marker="#new/2"><c>new/2</c></seealso>.</p>
+ </item>
+ <item>
+ <p>Indexes into counter arrays are one-based. A counter array of
+ size N contains N counters with index from 1 to N.</p>
+ </item>
+ </list>
+ </description>
+
+ <datatypes>
+ <datatype>
+ <name name="counters_ref"/>
+ <desc><p>Identifies a counter array returned from
+ <seealso marker="#new/2"><c>new/2</c></seealso>.</p>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <funcs>
+ <func>
+ <name name="new" arity="2"/>
+ <fsummary>Create counter array</fsummary>
+ <desc>
+ <p>Create a new counter array of <c><anno>Size</anno></c> counters.</p>
+ <p>Argument <c><anno>Opts</anno></c> is a list of the following possible
+ options:</p>
+ <taglist>
+ <tag><c>atomics</c> (Default)</tag>
+ <item><p>Counters will be sequentially consistent. If write
+ operation A is done sequencially before write operation B, then a concurrent reader
+ may see none of them, only A, or both A and B. It cannot see only B.</p>
+ </item>
+ <tag><c>write_concurrency</c></tag>
+ <item><p>This is an optimization to achieve very efficient concurrent
+ write operations at the expense of potential read inconsistency and memory
+ consumption per counter.</p>
+ <p>Read operations may see sequentially inconsistent results with
+ regard to concurrent write operations. Even if write operation A is done
+ sequencially before write operation B, a concurrent reader may see any
+ combination of A and B, including only B. A read operation is only
+ guaranteed to see all writes done sequentially before the read. No writes
+ are ever lost, but will eventually all be seen.</p>
+ </item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get" arity="2"/>
+ <fsummary>Read counter value</fsummary>
+ <desc>
+ <p>Read counter value.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add" arity="3"/>
+ <fsummary>Add to counter</fsummary>
+ <desc>
+ <p>Add <c><anno>Incr</anno></c> to counter.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="sub" arity="3"/>
+ <fsummary>Subtract from counter</fsummary>
+ <desc>
+ <p>Subtract <c><anno>Decr</anno></c> from counter.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="info" arity="1"/>
+ <fsummary>Get information about counter array.</fsummary>
+ <desc>
+ <p>Return information about a counter array in a map. The map
+ has the following keys (at least):</p>
+ <taglist>
+ <tag><c>size</c></tag>
+ <item><p>The number of counters in the array.</p></item>
+ <tag><c>memory</c></tag>
+ <item><p>Approximate memory consumption for the array in
+ bytes.</p></item>
+ </taglist>
+ </desc>
+ </func>
+
+ </funcs>
+</erlref>
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index 102de90c56..1b165e885c 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -1355,6 +1355,63 @@
</section>
+<section><title>Erts 9.3.3.6</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>List subtraction (The <c>--</c> operator) will now
+ yield properly on large inputs.</p>
+ <p>
+ Own Id: OTP-15371</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 9.3.3.5</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ ERTS internal trees of monitor structures could get into
+ an inconsistent state. This could cause <c>'DOWN'</c>
+ messages not to be delivered when they should, as well as
+ delivery of <c>'DOWN'</c> messages that should not be
+ delivered.</p>
+ <p>
+ This bug was introduced in ERTS version 9.0 (OTP 20.0)
+ and was fixed in ERTS version 10.0 (OTP 21.0) due to a
+ rewrite of the monitor code. That is, this bug only exist
+ in the OTP 20 release.</p>
+ <p>
+ Own Id: OTP-15399 Aux Id: ERL-751, ERIERL-262, OTP-14205 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 9.3.3.4</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fixed bug in <c>ets:select_replace</c> when called with a
+ fully bound key could cause a following call to
+ <c>ets:next</c> or <c>ets:prev</c> to crash the emulator
+ or return invalid result.</p>
+ <p>
+ Own Id: OTP-15346</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 9.3.3.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -4757,6 +4814,37 @@
</section>
+<section><title>Erts 7.3.1.6</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>List subtraction (The <c>--</c> operator) will now
+ yield properly on large inputs.</p>
+ <p>
+ Own Id: OTP-15371</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 7.3.1.5</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fixed small memory leak that could occur when sending to
+ a terminating port.</p>
+ <p>
+ Own Id: OTP-14609 Aux Id: ERIERL-238 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 7.3.1.4</title>
<section><title>Fixed Bugs and Malfunctions</title>
<list>
@@ -19172,4 +19260,3 @@
</section>
</section>
</chapter>
-
diff --git a/erts/doc/src/persistent_term.xml b/erts/doc/src/persistent_term.xml
new file mode 100644
index 0000000000..d2a138d65f
--- /dev/null
+++ b/erts/doc/src/persistent_term.xml
@@ -0,0 +1,290 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>persistent_term</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>persistent_term.xml</file>
+ </header>
+ <module>persistent_term</module>
+ <modulesummary>Persistent terms.</modulesummary>
+ <description>
+ <p>This module is similar to <seealso
+ marker="stdlib:ets"><c>ets</c></seealso> in that it provides a
+ storage for Erlang terms that can be accessed in constant time,
+ but with the difference that <c>persistent_term</c> has been
+ highly optimized for reading terms at the expense of writing and
+ updating terms. When a persistent term is updated or deleted, a
+ global garbage collection pass is run to scan all processes for
+ the deleted term, and to copy it into each process that still uses
+ it. Therefore, <c>persistent_term</c> is suitable for storing
+ Erlang terms that are frequently accessed but never or
+ infrequently updated.</p>
+
+ <warning><p>Persistent terms is an advanced feature and is not a
+ general replacement for ETS tables. Before using persistent terms,
+ make sure to fully understand the consequence to system
+ performance when updating or deleting persistent terms.</p></warning>
+
+ <p>Term lookup (using <seealso
+ marker="#get/1"><c>get/1</c></seealso>), is done in constant time
+ and without taking any locks, and the term is <strong>not</strong>
+ copied to the heap (as is the case with terms stored in ETS
+ tables).</p>
+
+ <p>Storing or updating a term (using <seealso
+ marker="#put/2"><c>put/2</c></seealso>) is proportional to the
+ number of already created persistent terms because the hash table
+ holding the keys will be copied. In addition, the term itself will
+ be copied.</p>
+
+ <p>When a (complex) term is deleted (using <seealso
+ marker="#erase/1"><c>erase/1</c></seealso>) or replaced by another
+ (using <seealso marker="#put/2"><c>put/2</c></seealso>), a global
+ garbage collection is initiated. It works like this:</p>
+
+ <list>
+ <item><p>All processes in the system will be scheduled to run a
+ scan of their heaps for the term that has been deleted. While
+ such scan is relatively light-weight, if there are many
+ processes, the system can become less responsive until all
+ process have scanned theirs heaps.</p></item>
+
+ <item><p>If the deleted term (or any part of it) is still used
+ by a process, that process will do a major (fullsweep) garbage
+ collection and copy the term into the process. However, at most
+ two processes at a time will be scheduled to do that kind of
+ garbage collection.</p></item>
+ </list>
+
+ <p>Deletion of atoms and other terms that fit in one machine word
+ is specially optimized to avoid doing a global GC. It is still not
+ recommended to update persistent terms with such values too
+ frequently because the hash table holding the keys is copied every
+ time a persistent term is updated.</p>
+
+ <p>Some examples are suitable uses for persistent terms are:</p>
+
+ <list>
+ <item><p>Storing of configuration data that must be easily
+ accessible by all processes.</p></item>
+
+ <item><p>Storing of references for NIF resources.</p></item>
+
+ <item><p>Storing of references for efficient counters.</p></item>
+
+ <item><p>Storing an atom to indicate a logging level or whether debugging
+ is turned on.</p></item>
+ </list>
+
+ </description>
+
+ <section>
+ <title>Storing Huge Persistent Terms</title>
+ <p>The current implementation of persistent terms uses the literal
+ <seealso marker="erts_alloc">allocator</seealso> also used for
+ literals (constant terms) in BEAM code. By default, 1 GB of
+ virtual address space is reserved for literals in BEAM code and
+ persistent terms. The amount of virtual address space reserved for
+ literals can be changed by using the <seealso
+ marker="erts_alloc#MIscs"><c>+MIscs option</c></seealso> when
+ starting the emulator.</p>
+
+ <p>Here is an example how the reserved virtual address space for literals
+ can be raised to 2 GB (2048 MB):</p>
+
+ <pre>
+ erl +MIscs 2048</pre>
+ </section>
+
+ <section>
+ <title>Warning For Many Persistent Terms</title>
+ <p>The runtime system will send a warning report to the
+ error logger if more than 20000 persistent terms have been
+ created. It will look like this:</p>
+
+<pre>
+More than 20000 persistent terms have been created.
+It is recommended to avoid creating an excessive number of
+persistent terms, as creation and deletion of persistent terms
+will be slower as the number of persistent terms increases.</pre>
+ </section>
+
+ <section>
+ <title>Best Practices for Using Persistent Terms</title>
+
+ <p>It is recommended to use keys like <c>?MODULE</c> or
+ <c>{?MODULE,SubKey}</c> to avoid name collisions.</p>
+
+ <p>Prefer creating a few large persistent terms to creating many
+ small persistent terms. The execution time for storing a
+ persistent term is proportional to the number of already existing
+ terms.</p>
+
+ <p>Updating a persistent term with the same value as it already
+ has is specially optimized to do nothing quickly; thus, there is
+ no need compare the old and new values and avoid calling
+ <seealso marker="#put/2"><c>put/2</c></seealso> if the values
+ are equal.</p>
+
+ <p>When atoms or other terms that fit in one machine word are
+ deleted, no global GC is needed. Therefore, persistent terms that
+ have atoms as their values can be updated more frequently, but
+ note that updating such persistent terms is still much more
+ expensive than reading them.</p>
+
+ <p>Updating or deleting a persistent term will trigger a global GC
+ if the term does not fit in one machine word. Processes will be
+ scheduled as usual, but all processes will be made runnable at
+ once, which will make the system less responsive until all process
+ have run and scanned their heaps for the deleted terms. One way to
+ minimize the effects on responsiveness could be to minimize the
+ number of processes on the node before updating or deleting a
+ persistent term. It would also be wise to avoid updating terms
+ when the system is at peak load.</p>
+
+ <p>Avoid storing a retrieved persistent term in a process if that
+ persistent term could be deleted or updated in the future. If a
+ process holds a reference to a persistent term when the term is
+ deleted, the process will be garbage collected and the term copied
+ to process.</p>
+
+ <p>Avoid updating or deleting more than one persistent term at a
+ time. Each deleted term will trigger its own global GC. That
+ means that deleting N terms will make the system less responsive N
+ times longer than deleting a single persistent term. Therefore,
+ terms that are to be updated at the same time should be collected
+ into a larger term, for example, a map or a tuple.</p>
+ </section>
+
+ <section>
+ <title>Example</title>
+
+ <p>The following example shows how lock contention for ETS tables
+ can be minimized by having one ETS table for each scheduler. The
+ table identifiers for the ETS tables are stored as a single
+ persistent term:</p>
+
+<pre>
+ %% There is one ETS table for each scheduler.
+ Sid = erlang:system_info(scheduler_id),
+ Tid = element(Sid, persistent_term:get(?MODULE)),
+ ets:update_counter(Tid, Key, 1).</pre>
+
+ </section>
+
+ <datatypes>
+ <datatype>
+ <name name="key"/>
+ <desc>
+ <p>Any Erlang term.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="value"/>
+ <desc>
+ <p>Any Erlang term.</p>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <funcs>
+ <func>
+ <name name="erase" arity="1"/>
+ <fsummary>Erase the name for a persistent term.</fsummary>
+ <desc>
+ <p>Erase the name for the persistent term with key
+ <c><anno>Key</anno></c>. The return value will be <c>true</c>
+ if there was a persistent term with the key
+ <c><anno>Key</anno></c>, and <c>false</c> if there was no
+ persistent term associated with the key.</p>
+ <p>If there existed a previous persistent term associated with
+ key <c><anno>Key</anno></c>, a global GC has been initiated
+ when <c>erase/1</c> returns. See <seealso
+ marker="#description">Description</seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get" arity="0"/>
+ <fsummary>Get all persistent terms.</fsummary>
+ <desc>
+ <p>Retrieve the keys and values for all persistent terms.
+ The keys will be copied to the heap for the process calling
+ <c>get/0</c>, but the values will not.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get" arity="1"/>
+ <fsummary>Get the value for a persistent term.</fsummary>
+ <desc>
+ <p>Retrieve the value for the persistent term associated with
+ the key <c><anno>Key</anno></c>. The lookup will be made in
+ constant time and the value will not be copied to the heap
+ of the calling process.</p>
+ <p>This function fails with a <c>badarg</c> exception if no
+ term has been stored with the key
+ <c><anno>Key</anno></c>.</p>
+ <p>If the calling process holds on to the value of the
+ persistent term and the persistent term is deleted in the future,
+ the term will be copied to the process.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="info" arity="0"/>
+ <fsummary>Get information about persistent terms.</fsummary>
+ <desc>
+ <p>Return information about persistent terms in a map. The map
+ has the following keys:</p>
+ <taglist>
+ <tag><c>count</c></tag>
+ <item><p>The number of persistent terms.</p></item>
+ <tag><c>memory</c></tag>
+ <item><p>The total amount of memory (measured in bytes)
+ used by all persistent terms.</p></item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
+ <name name="put" arity="2"/>
+ <fsummary>Store a term.</fsummary>
+ <desc>
+ <p>Store the value <c><anno>Value</anno></c> as a persistent term and
+ associate it with the key <c><anno>Key</anno></c>.</p>
+ <p>If the value <c><anno>Value</anno></c> is equal to the value
+ previously stored for the key, <c>put/2</c> will do nothing and return
+ quickly.</p>
+ <p>If there existed a previous persistent term associated with
+ key <c><anno>Key</anno></c>, a global GC has been initiated
+ when <c>put/2</c> returns. See <seealso
+ marker="#description">Description</seealso>.</p>
+ </desc>
+ </func>
+ </funcs>
+</erlref>
diff --git a/erts/doc/src/ref_man.xml b/erts/doc/src/ref_man.xml
index 0617463a7b..a78aaa449e 100644
--- a/erts/doc/src/ref_man.xml
+++ b/erts/doc/src/ref_man.xml
@@ -34,6 +34,7 @@
<xi:include href="erl_prim_loader.xml"/>
<xi:include href="erlang.xml"/>
<xi:include href="init.xml"/>
+ <xi:include href="persistent_term.xml"/>
<xi:include href="zlib.xml"/>
<xi:include href="epmd.xml"/>
<xi:include href="erl.xml"/>
@@ -49,5 +50,7 @@
<xi:include href="erts_alloc.xml"/>
<xi:include href="erl_nif.xml"/>
<xi:include href="erl_tracer.xml"/>
+ <xi:include href="atomics.xml"/>
+ <xi:include href="counters.xml"/>
</application>
diff --git a/erts/doc/src/specs.xml b/erts/doc/src/specs.xml
index ed6be650e5..0b943e6295 100644
--- a/erts/doc/src/specs.xml
+++ b/erts/doc/src/specs.xml
@@ -4,5 +4,8 @@
<xi:include href="../specs/specs_erlang.xml"/>
<xi:include href="../specs/specs_erl_tracer.xml"/>
<xi:include href="../specs/specs_init.xml"/>
+ <xi:include href="../specs/specs_persistent_term.xml"/>
<xi:include href="../specs/specs_zlib.xml"/>
+ <xi:include href="../specs/specs_atomics.xml"/>
+ <xi:include href="../specs/specs_counters.xml"/>
</specs>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 054692819e..57a9d45887 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -633,21 +633,24 @@ GENERATE += $(TTF_DIR)/driver_tab.c
# This list must be consistent with PRE_LOADED_MODULES in
# erts/preloaded/src/Makefile.
-PRELOAD_BEAM = $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erts_code_purger.beam \
- $(ERL_TOP)/erts/preloaded/ebin/init.beam \
- $(ERL_TOP)/erts/preloaded/ebin/prim_buffer.beam \
- $(ERL_TOP)/erts/preloaded/ebin/prim_eval.beam \
- $(ERL_TOP)/erts/preloaded/ebin/prim_inet.beam \
- $(ERL_TOP)/erts/preloaded/ebin/prim_file.beam \
- $(ERL_TOP)/erts/preloaded/ebin/zlib.beam \
- $(ERL_TOP)/erts/preloaded/ebin/prim_zip.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erlang.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erl_tracer.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erts_literal_area_collector.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erts_dirty_process_signal_handler.beam
+PRELOAD_BEAM = $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erts_code_purger.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/init.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/prim_buffer.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/prim_eval.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/prim_inet.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/prim_file.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/zlib.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/prim_zip.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erlang.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erl_tracer.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erts_literal_area_collector.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erts_dirty_process_signal_handler.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/atomics.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/counters.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/persistent_term.beam
ifeq ($(TARGET),win32)
# On windows the preloaded objects are in a resource object.
@@ -839,6 +842,8 @@ RUN_OBJS += \
$(OBJDIR)/erl_bif_ddll.o $(OBJDIR)/erl_bif_guard.o \
$(OBJDIR)/erl_bif_info.o $(OBJDIR)/erl_bif_op.o \
$(OBJDIR)/erl_bif_os.o $(OBJDIR)/erl_bif_lists.o \
+ $(OBJDIR)/erl_bif_persistent.o \
+ $(OBJDIR)/erl_bif_atomics.o $(OBJDIR)/erl_bif_counters.o \
$(OBJDIR)/erl_bif_trace.o $(OBJDIR)/erl_bif_unique.o \
$(OBJDIR)/erl_bif_wrap.o $(OBJDIR)/erl_nfunc_sched.o \
$(OBJDIR)/erl_guard_bifs.o $(OBJDIR)/erl_dirty_bif_wrap.o \
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index 5381611fab..59b51fd15e 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -174,7 +174,7 @@ atom_alloc(Atom* tmpl)
/*
* Precompute ordinal value of first 3 bytes + 7 bits.
- * This is used by utils.c:erts_cmp_atoms().
+ * This is used by erl_utils.h:erts_cmp_atoms().
* We cannot use the full 32 bits of the first 4 bytes,
* since we use the sign of the difference between two
* ordinal values to represent their relative order.
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 45b7540aeb..a14f22b19e 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -182,6 +182,7 @@ atom control
atom copy
atom copy_literals
atom counters
+atom count
atom cpu
atom cpu_timestamp
atom cr
@@ -287,6 +288,7 @@ atom gc_minor_end
atom gc_minor_start
atom Ge='>='
atom generational
+atom get_all_trap
atom get_seq_token
atom get_tcw
atom gather_gc_info_result
@@ -325,6 +327,7 @@ atom index
atom infinity
atom info
atom info_msg
+atom info_trap
atom init
atom initial_call
atom input
@@ -393,6 +396,7 @@ atom microsecond
atom microstate_accounting
atom milli_seconds
atom millisecond
+atom min
atom min_heap_size
atom min_bin_vheap_size
atom minor
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index d221e6aea6..bb1b2e5b27 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -1752,29 +1752,7 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
finalize_purge_operation(BIF_P, ret == am_true);
if (literals) {
- ErtsLiteralAreaRef *ref;
- ErtsMessage *mp;
- ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
- sizeof(ErtsLiteralAreaRef));
- ref->literal_area = literals;
- ref->next = NULL;
- erts_mtx_lock(&release_literal_areas.mtx);
- if (release_literal_areas.last) {
- release_literal_areas.last->next = ref;
- release_literal_areas.last = ref;
- }
- else {
- release_literal_areas.first = ref;
- release_literal_areas.last = ref;
- }
- erts_mtx_unlock(&release_literal_areas.mtx);
- mp = erts_alloc_message(0, NULL);
- ERL_MESSAGE_TOKEN(mp) = am_undefined;
- erts_queue_proc_message(BIF_P,
- erts_literal_area_collector,
- 0,
- mp,
- am_copy_literals);
+ erts_queue_release_literals(BIF_P, literals);
}
return ret;
@@ -1786,6 +1764,41 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
}
}
+void
+erts_queue_release_literals(Process* c_p, ErtsLiteralArea* literals)
+{
+ ErtsLiteralAreaRef *ref;
+ ErtsMessage *mp;
+ ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
+ sizeof(ErtsLiteralAreaRef));
+ ref->literal_area = literals;
+ ref->next = NULL;
+ erts_mtx_lock(&release_literal_areas.mtx);
+ if (release_literal_areas.last) {
+ release_literal_areas.last->next = ref;
+ release_literal_areas.last = ref;
+ } else {
+ release_literal_areas.first = ref;
+ release_literal_areas.last = ref;
+ }
+ erts_mtx_unlock(&release_literal_areas.mtx);
+ mp = erts_alloc_message(0, NULL);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ if (c_p == NULL) {
+ erts_queue_message(erts_literal_area_collector,
+ 0,
+ mp,
+ am_copy_literals,
+ am_system);
+ } else {
+ erts_queue_proc_message(c_p,
+ erts_literal_area_collector,
+ 0,
+ mp,
+ am_copy_literals);
+ }
+}
+
/*
* Move code from current to old and null all export entries for the module
*/
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index f18af8bcd7..015c051cc1 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -3622,6 +3622,10 @@ erts_internal_garbage_collect_1(BIF_ALIST_1)
default: BIF_ERROR(BIF_P, BADARG);
}
erts_garbage_collect(BIF_P, 0, NULL, 0);
+ if (ERTS_PROC_IS_EXITING(BIF_P)) {
+ /* The max heap size limit was reached. */
+ return THE_NON_VALUE;
+ }
return am_true;
}
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 7548924178..aa3c3acd9f 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -40,6 +40,7 @@
# Note: Guards BIFs usually require special support in the compiler.
#
+
gcbif erlang:abs/1
bif erlang:adler32/1
bif erlang:adler32/2
@@ -698,3 +699,28 @@ ubif erlang:map_get/2
ubif erlang:is_map_key/2
bif ets:internal_delete_all/2
bif ets:internal_select_delete/2
+
+#
+# New in 21.2
+#
+
+bif persistent_term:put/2
+bif persistent_term:get/1
+bif persistent_term:get/0
+bif persistent_term:erase/1
+bif persistent_term:info/0
+bif erts_internal:erase_persistent_terms/0
+
+bif erts_internal:atomics_new/2
+bif atomics:get/2
+bif atomics:put/3
+bif atomics:add/3
+bif atomics:add_get/3
+bif atomics:exchange/3
+bif atomics:compare_exchange/4
+bif atomics:info/1
+
+bif erts_internal:counters_new/1
+bif erts_internal:counters_get/2
+bif erts_internal:counters_add/3
+bif erts_internal:counters_info/1
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index e7bfd04b73..e7bd046e18 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -1074,6 +1074,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
Eterm* ptr;
Eterm *lit_purge_ptr = info->lit_purge_ptr;
Uint lit_purge_sz = info->lit_purge_sz;
+ int copy_literals = info->copy_literals;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
#endif
@@ -1119,7 +1120,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (in_literal_purge_area(ptr))
+ if (copy_literals || in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1170,7 +1171,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (in_literal_purge_area(ptr))
+ if (copy_literals || in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1338,6 +1339,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
unsigned remaining;
Eterm *lit_purge_ptr = info->lit_purge_ptr;
Uint lit_purge_sz = info->lit_purge_sz;
+ int copy_literals = info->copy_literals;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
Eterm saved_obj = obj;
@@ -1387,7 +1389,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = list_val(obj);
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!in_literal_purge_area(ptr)) {
+ if (!(copy_literals || in_literal_purge_area(ptr))) {
*resp = obj;
} else {
Uint bsz = 0;
@@ -1455,7 +1457,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = boxed_val(obj);
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!in_literal_purge_area(ptr)) {
+ if (!(copy_literals || in_literal_purge_area(ptr))) {
*resp = obj;
} else {
Uint bsz = 0;
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 5409b89bab..4f03a34390 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -274,9 +274,13 @@ type ML_YIELD_STATE SHORT_LIVED SYSTEM monitor_link_yield_state
type ML_DIST STANDARD SYSTEM monitor_link_dist
type PF3_ARGS SHORT_LIVED PROCESSES process_flag_3_arguments
type SETUP_CONN_ARG SHORT_LIVED PROCESSES setup_connection_argument
+type LIST_TRAP SHORT_LIVED PROCESSES list_bif_trap_state
type ENVIRONMENT SYSTEM SYSTEM environment
+type PERSISTENT_TERM LONG_LIVED CODE persisten_term
+type PERSISTENT_LOCK_Q SHORT_LIVED SYSTEM persistent_lock_q
+
#
# Types used for special emulators
#
@@ -334,6 +338,8 @@ type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request
type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
type MSACC DRIVER SYSTEM microstate_accounting
type SYS_CHECK_REQ SHORT_LIVED SYSTEM system_check_request
+type ATOMICS STANDARD SYSTEM erl_bif_atomics
+type COUNTERS STANDARD SYSTEM erl_bif_counters
#
# Types used by system specific code
diff --git a/erts/emulator/beam/erl_bif_atomics.c b/erts/emulator/beam/erl_bif_atomics.c
new file mode 100644
index 0000000000..092dbb3bd3
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_atomics.c
@@ -0,0 +1,256 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: High performance atomics.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof */
+
+#include "sys.h"
+#include "export.h"
+#include "bif.h"
+#include "erl_threads.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+
+typedef struct
+{
+ int is_signed;
+ UWord vlen;
+ erts_atomic64_t v[1];
+}AtomicsRef;
+
+static int atomics_destructor(Binary *unused)
+{
+ return 1;
+}
+
+#define OPT_SIGNED (1 << 0)
+
+BIF_RETTYPE erts_internal_atomics_new_2(BIF_ALIST_2)
+{
+ AtomicsRef* p;
+ Binary* mbin;
+ UWord i, cnt, opts;
+ Uint bytes;
+ Eterm* hp;
+
+ if (!term_to_UWord(BIF_ARG_1, &cnt)
+ || cnt == 0
+ || !term_to_UWord(BIF_ARG_2, &opts)) {
+
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (cnt > (ERTS_UWORD_MAX / sizeof(p->v[0])))
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+
+ bytes = offsetof(AtomicsRef, v) + cnt*sizeof(p->v[0]);
+ mbin = erts_create_magic_binary_x(bytes,
+ atomics_destructor,
+ ERTS_ALC_T_ATOMICS,
+ 0);
+ p = ERTS_MAGIC_BIN_DATA(mbin);
+ p->is_signed = opts & OPT_SIGNED;
+ p->vlen = cnt;
+ for (i=0; i < cnt; i++)
+ erts_atomic64_init_nob(&p->v[i], 0);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(BIF_P), mbin);
+}
+
+static ERTS_INLINE int get_ref(Eterm ref, AtomicsRef** pp)
+{
+ Binary* mbin;
+ if (!is_internal_magic_ref(ref))
+ return 0;
+
+ mbin = erts_magic_ref2bin(ref);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != atomics_destructor)
+ return 0;
+ *pp = ERTS_MAGIC_BIN_DATA(mbin);
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_ix(Eterm ref, Eterm ix,
+ AtomicsRef** pp, UWord* ixp)
+{
+ return (get_ref(ref, pp)
+ && term_to_UWord(ix, ixp)
+ && --(*ixp) < (*pp)->vlen);
+}
+
+static ERTS_INLINE int get_value(AtomicsRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (p->is_signed ?
+ term_to_Sint64(term, (Sint64*)valp) :
+ term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE int get_incr(AtomicsRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (term_to_Sint64(term, (Sint64*)valp)
+ || term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE Eterm bld_atomic(Process* proc, AtomicsRef* p,
+ erts_aint64_t val)
+{
+ if (p->is_signed) {
+ if (IS_SSMALL(val))
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_sint64_to_big(val, &hp);
+ }
+ }
+ else {
+ if ((Uint64)val <= MAX_SMALL)
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_UINT64_HEAP_SIZE(val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_uint64_to_big(val, &hp);
+ }
+ }
+}
+
+BIF_RETTYPE atomics_put_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t val;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &val)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_atomic64_set_mb(&p->v[ix], val);
+ return am_ok;
+}
+
+BIF_RETTYPE atomics_get_2(BIF_ALIST_2)
+{
+ AtomicsRef* p;
+ UWord ix;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return bld_atomic(BIF_P, p, erts_atomic64_read_mb(&p->v[ix]));
+}
+
+BIF_RETTYPE atomics_add_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t incr;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_atomic64_add_mb(&p->v[ix], incr);
+ return am_ok;
+}
+
+BIF_RETTYPE atomics_add_get_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t incr;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return bld_atomic(BIF_P, p, erts_atomic64_add_read_mb(&p->v[ix], incr));
+}
+
+BIF_RETTYPE atomics_exchange_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t desired, was;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &desired)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ was = erts_atomic64_xchg_mb(&p->v[ix], desired);
+ return bld_atomic(BIF_P, p, was);
+}
+
+BIF_RETTYPE atomics_compare_exchange_4(BIF_ALIST_4)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t expected, desired, was;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &expected)
+ || !get_value(p, BIF_ARG_4, &desired)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ was = erts_atomic64_cmpxchg_mb(&p->v[ix], desired, expected);
+ return was == expected ? am_ok : bld_atomic(BIF_P, p, was);
+}
+
+BIF_RETTYPE atomics_info_1(BIF_ALIST_1)
+{
+ AtomicsRef* p;
+ Uint hsz = MAP4_SZ;
+ Eterm *hp;
+ Uint64 max;
+ Sint64 min;
+ UWord memory;
+ Eterm max_val, min_val, sz_val, mem_val;
+
+ if (!get_ref(BIF_ARG_1, &p))
+ BIF_ERROR(BIF_P, BADARG);
+
+ max = p->is_signed ? ERTS_SINT64_MAX : ERTS_UINT64_MAX;
+ min = p->is_signed ? ERTS_SINT64_MIN : 0;
+ memory = erts_magic_ref2bin(BIF_ARG_1)->orig_size;
+
+ erts_bld_uint64(NULL, &hsz, max);
+ erts_bld_sint64(NULL, &hsz, min);
+ erts_bld_uword(NULL, &hsz, p->vlen);
+ erts_bld_uword(NULL, &hsz, memory);
+
+ hp = HAlloc(BIF_P, hsz);
+ max_val = erts_bld_uint64(&hp, NULL, max);
+ min_val = erts_bld_sint64(&hp, NULL, min);
+ sz_val = erts_bld_uword(&hp, NULL, p->vlen);
+ mem_val = erts_bld_uword(&hp, NULL, memory);
+
+ return MAP4(hp, am_max, max_val,
+ am_memory, mem_val,
+ am_min, min_val,
+ am_size, sz_val);
+}
diff --git a/erts/emulator/beam/erl_bif_counters.c b/erts/emulator/beam/erl_bif_counters.c
new file mode 100644
index 0000000000..a46b462225
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_counters.c
@@ -0,0 +1,219 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: High performance atomics.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof */
+
+#include "sys.h"
+#include "export.h"
+#include "bif.h"
+#include "erl_threads.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+
+
+#define COUNTERS_PER_CACHE_LINE (ERTS_CACHE_LINE_SIZE / sizeof(erts_atomic64_t))
+
+typedef struct
+{
+ UWord arity;
+#ifdef DEBUG
+ UWord ulen;
+#endif
+ union {
+ erts_atomic64_t v[COUNTERS_PER_CACHE_LINE];
+ byte cache_line__[ERTS_CACHE_LINE_SIZE];
+ } u[1];
+}CountersRef;
+
+static int counters_destructor(Binary *unused)
+{
+ return 1;
+}
+
+
+static UWord ERTS_INLINE div_ceil(UWord dividend, UWord divisor)
+{
+ return (dividend + divisor - 1) / divisor;
+}
+
+BIF_RETTYPE erts_internal_counters_new_1(BIF_ALIST_1)
+{
+ CountersRef* p;
+ Binary* mbin;
+ UWord ui, vi, cnt;
+ Uint bytes, cache_lines;
+ Eterm* hp;
+
+ if (!term_to_UWord(BIF_ARG_1, &cnt)
+ || cnt == 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (cnt > (ERTS_UWORD_MAX / (sizeof(erts_atomic64_t)*2*erts_no_schedulers)))
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+
+ cache_lines = erts_no_schedulers * div_ceil(cnt, COUNTERS_PER_CACHE_LINE);
+ bytes = offsetof(CountersRef, u) + cache_lines * ERTS_CACHE_LINE_SIZE;
+ mbin = erts_create_magic_binary_x(bytes,
+ counters_destructor,
+ ERTS_ALC_T_ATOMICS,
+ 0);
+ p = ERTS_MAGIC_BIN_DATA(mbin);
+ p->arity = cnt;
+#ifdef DEBUG
+ p->ulen = cache_lines;
+#endif
+ ASSERT((byte*)&p->u[cache_lines] <= ((byte*)p + bytes));
+ for (ui=0; ui < cache_lines; ui++)
+ for (vi=0; vi < COUNTERS_PER_CACHE_LINE; vi++)
+ erts_atomic64_init_nob(&p->u[ui].v[vi], 0);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(BIF_P), mbin);
+}
+
+static ERTS_INLINE int get_ref(Eterm ref, CountersRef** pp)
+{
+ Binary* mbin;
+ if (!is_internal_magic_ref(ref))
+ return 0;
+
+ mbin = erts_magic_ref2bin(ref);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != counters_destructor)
+ return 0;
+ *pp = ERTS_MAGIC_BIN_DATA(mbin);
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app,
+ UWord sched_ix)
+{
+ CountersRef* p;
+ UWord ix, ui, vi;
+ if (!get_ref(ref, &p) || !term_to_UWord(index, &ix) || --ix >= p->arity)
+ return 0;
+ ui = (ix / COUNTERS_PER_CACHE_LINE) * erts_no_schedulers + sched_ix;
+ vi = ix % COUNTERS_PER_CACHE_LINE;
+ ASSERT(ui < p->ulen);
+ *pp = p;
+ *app = &p->u[ui].v[vi];
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_my_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+ return get_ref_cnt(ref, index, pp, app, esdp->no - 1);
+}
+
+static ERTS_INLINE int get_ref_first_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app)
+{
+ return get_ref_cnt(ref, index, pp, app, 0);
+}
+
+static ERTS_INLINE int get_incr(CountersRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (term_to_Sint64(term, (Sint64*)valp)
+ || term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE Eterm bld_counter(Process* proc, CountersRef* p,
+ erts_aint64_t val)
+{
+ if (IS_SSMALL(val))
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_sint64_to_big(val, &hp);
+ }
+}
+
+BIF_RETTYPE erts_internal_counters_get_2(BIF_ALIST_2)
+{
+ CountersRef* p;
+ erts_atomic64_t* ap;
+ erts_aint64_t acc = 0;
+ int j;
+
+ if (!get_ref_first_cnt(BIF_ARG_1, BIF_ARG_2, &p, &ap)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ for (j = erts_no_schedulers; j ; --j) {
+ acc += erts_atomic64_read_nob(ap);
+ ap = (erts_atomic64_t*) ((byte*)ap + ERTS_CACHE_LINE_SIZE);
+ }
+ return bld_counter(BIF_P, p, acc);
+}
+
+BIF_RETTYPE erts_internal_counters_add_3(BIF_ALIST_3)
+{
+ CountersRef* p;
+ erts_atomic64_t* ap;
+ erts_aint64_t incr, sum;
+
+ if (!get_ref_my_cnt(BIF_ARG_1, BIF_ARG_2, &p, &ap)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ sum = incr + erts_atomic64_read_nob(ap);
+ erts_atomic64_set_nob(ap, sum);
+ return am_ok;
+}
+
+
+BIF_RETTYPE erts_internal_counters_info_1(BIF_ALIST_1)
+{
+ CountersRef* p;
+ Uint hsz = MAP2_SZ;
+ Eterm *hp;
+ UWord memory;
+ Eterm sz_val, mem_val;
+
+ if (!get_ref(BIF_ARG_1, &p))
+ BIF_ERROR(BIF_P, BADARG);
+
+ memory = erts_magic_ref2bin(BIF_ARG_1)->orig_size;
+ erts_bld_uword(NULL, &hsz, p->arity);
+ erts_bld_uword(NULL, &hsz, memory);
+
+ hp = HAlloc(BIF_P, hsz);
+ sz_val = erts_bld_uword(&hp, NULL, p->arity);
+ mem_val = erts_bld_uword(&hp, NULL, memory);
+
+ return MAP2(hp, am_memory, mem_val,
+ am_size, sz_val);
+}
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index 395be67a90..a793b34852 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -29,12 +29,13 @@
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
-#include "erl_process.h"
-#include "error.h"
#include "bif.h"
+#include "erl_binary.h"
+
static Eterm keyfind(int Bif, Process* p, Eterm Key, Eterm Pos, Eterm List);
+
static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
{
Eterm list;
@@ -146,103 +147,724 @@ BIF_RETTYPE append_2(BIF_ALIST_2)
return append(BIF_P, BIF_ARG_1, BIF_ARG_2);
}
-/*
- * erlang:'--'/2
- */
+/* erlang:'--'/2
+ *
+ * Subtracts a list from another (LHS -- RHS), removing the first occurrence of
+ * each element in LHS from RHS. There is no type coercion so the elements must
+ * match exactly.
+ *
+ * The BIF is broken into several stages that can all trap individually, and it
+ * chooses its algorithm based on input size. If either input is small it will
+ * use a linear scan tuned to which side it's on, and if both inputs are large
+ * enough it will convert RHS into a multiset to provide good asymptotic
+ * behavior. */
+
+#define SUBTRACT_LHS_THRESHOLD 16
+#define SUBTRACT_RHS_THRESHOLD 16
+
+typedef enum {
+ SUBTRACT_STAGE_START,
+ SUBTRACT_STAGE_LEN_LHS,
+
+ /* Naive linear scan that's efficient when
+ * LEN_LHS <= SUBTRACT_LHS_THRESHOLD. */
+ SUBTRACT_STAGE_NAIVE_LHS,
+
+ SUBTRACT_STAGE_LEN_RHS,
+
+ /* As SUBTRACT_STAGE_NAIVE_LHS but for RHS. */
+ SUBTRACT_STAGE_NAIVE_RHS,
+
+ /* Creates a multiset from RHS for faster lookups before sweeping through
+ * LHS. The set is implemented as a red-black tree and duplicate elements
+ * are handled by a counter on each node. */
+ SUBTRACT_STAGE_SET_BUILD,
+ SUBTRACT_STAGE_SET_FINISH
+} ErtsSubtractCtxStage;
+
+typedef struct subtract_node__ {
+ struct subtract_node__ *parent;
+ struct subtract_node__ *left;
+ struct subtract_node__ *right;
+ int is_red;
+
+ Eterm key;
+ Uint count;
+} subtract_tree_t;
+
+typedef struct {
+ ErtsSubtractCtxStage stage;
+
+ Eterm lhs_original;
+ Eterm rhs_original;
+
+ Uint lhs_remaining;
+ Uint rhs_remaining;
+
+ Eterm iterator;
+
+ Eterm *result_cdr;
+ Eterm result;
+
+ union {
+ Eterm lhs_elements[SUBTRACT_LHS_THRESHOLD];
+ Eterm rhs_elements[SUBTRACT_RHS_THRESHOLD];
+
+ struct {
+ subtract_tree_t *tree;
+
+ /* A memory area for the tree's nodes, saving us the need to have
+ * one allocation per node. */
+ subtract_tree_t *alloc_start;
+ subtract_tree_t *alloc;
+ } rhs_set;
+ } u;
+} ErtsSubtractContext;
+
+#define ERTS_RBT_PREFIX subtract
+#define ERTS_RBT_T subtract_tree_t
+#define ERTS_RBT_KEY_T Eterm
+#define ERTS_RBT_FLAGS_T int
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->parent = NULL; \
+ (T)->left = NULL; \
+ (T)->right = NULL; \
+ } while(0)
+#define ERTS_RBT_IS_RED(T) ((T)->is_red)
+#define ERTS_RBT_SET_RED(T) ((T)->is_red = 1)
+#define ERTS_RBT_IS_BLACK(T) (!ERTS_RBT_IS_RED(T))
+#define ERTS_RBT_SET_BLACK(T) ((T)->is_red = 0)
+#define ERTS_RBT_GET_FLAGS(T) ((T)->is_red)
+#define ERTS_RBT_SET_FLAGS(T, F) ((T)->is_red = F)
+#define ERTS_RBT_GET_PARENT(T) ((T)->parent)
+#define ERTS_RBT_SET_PARENT(T, P) ((T)->parent = P)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->key)
+#define ERTS_RBT_CMP_KEYS(KX, KY) CMP_TERM(KX, KY)
+#define ERTS_RBT_WANT_LOOKUP_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+static int subtract_continue(Process *p, ErtsSubtractContext *context);
+
+static void subtract_ctx_dtor(ErtsSubtractContext *context) {
+ switch (context->stage) {
+ case SUBTRACT_STAGE_SET_BUILD:
+ case SUBTRACT_STAGE_SET_FINISH:
+ erts_free(ERTS_ALC_T_LIST_TRAP, context->u.rhs_set.alloc_start);
+ break;
+ default:
+ break;
+ }
+}
-#define SMALL_VEC_SIZE 10
-static Eterm subtract(Process* p, Eterm A, Eterm B)
-{
- Eterm list;
- Eterm* hp;
- Uint need;
- Eterm res;
- Eterm small_vec[SMALL_VEC_SIZE]; /* Preallocated memory for small lists */
- Eterm* vec_p;
- Eterm* vp;
- Sint i;
- Sint n;
- Sint m;
-
- if ((n = erts_list_length(A)) < 0) {
- BIF_ERROR(p, BADARG);
+static int subtract_ctx_bin_dtor(Binary *context_bin) {
+ ErtsSubtractContext *context = ERTS_MAGIC_BIN_DATA(context_bin);
+ subtract_ctx_dtor(context);
+ return 1;
+}
+
+static void subtract_ctx_move(ErtsSubtractContext *from,
+ ErtsSubtractContext *to) {
+ int uses_result_cdr = 0;
+
+ to->stage = from->stage;
+
+ to->lhs_original = from->lhs_original;
+ to->rhs_original = from->rhs_original;
+
+ to->lhs_remaining = from->lhs_remaining;
+ to->rhs_remaining = from->rhs_remaining;
+
+ to->iterator = from->iterator;
+ to->result = from->result;
+
+ switch (to->stage) {
+ case SUBTRACT_STAGE_NAIVE_LHS:
+ sys_memcpy(to->u.lhs_elements,
+ from->u.lhs_elements,
+ sizeof(Eterm) * to->lhs_remaining);
+ break;
+ case SUBTRACT_STAGE_NAIVE_RHS:
+ sys_memcpy(to->u.rhs_elements,
+ from->u.rhs_elements,
+ sizeof(Eterm) * to->rhs_remaining);
+
+ uses_result_cdr = 1;
+ break;
+ case SUBTRACT_STAGE_SET_FINISH:
+ uses_result_cdr = 1;
+ /* FALL THROUGH */
+ case SUBTRACT_STAGE_SET_BUILD:
+ to->u.rhs_set.alloc_start = from->u.rhs_set.alloc_start;
+ to->u.rhs_set.alloc = from->u.rhs_set.alloc;
+ to->u.rhs_set.tree = from->u.rhs_set.tree;
+ break;
+ default:
+ break;
}
- if ((m = erts_list_length(B)) < 0) {
- BIF_ERROR(p, BADARG);
+
+ if (uses_result_cdr) {
+ if (from->result_cdr == &from->result) {
+ to->result_cdr = &to->result;
+ } else {
+ to->result_cdr = from->result_cdr;
+ }
}
-
- if (n == 0)
- BIF_RET(NIL);
- if (m == 0)
- BIF_RET(A);
-
- /* allocate element vector */
- if (n <= SMALL_VEC_SIZE)
- vec_p = small_vec;
- else
- vec_p = (Eterm*) erts_alloc(ERTS_ALC_T_TMP, n * sizeof(Eterm));
-
- /* PUT ALL ELEMENTS IN VP */
- vp = vec_p;
- list = A;
- i = n;
- while(i--) {
- Eterm* listp = list_val(list);
- *vp++ = CAR(listp);
- list = CDR(listp);
+}
+
+static Eterm subtract_create_trap_state(Process *p,
+ ErtsSubtractContext *context) {
+ Binary *state_bin;
+ Eterm *hp;
+
+ state_bin = erts_create_magic_binary(sizeof(ErtsSubtractContext),
+ subtract_ctx_bin_dtor);
+
+ subtract_ctx_move(context, ERTS_MAGIC_BIN_DATA(state_bin));
+
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+
+ return erts_mk_magic_ref(&hp, &MSO(p), state_bin);
+}
+
+static int subtract_enter_len_lhs(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_LEN_LHS;
+
+ context->iterator = context->lhs_original;
+ context->lhs_remaining = 0;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_enter_len_rhs(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_LEN_RHS;
+
+ context->iterator = context->rhs_original;
+ context->rhs_remaining = 0;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_get_length(Process *p, Eterm *iterator_p, Uint *count_p) {
+ static const Sint ELEMENTS_PER_RED = 32;
+
+ Sint budget, count;
+ Eterm iterator;
+
+ budget = ELEMENTS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ iterator = *iterator_p;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ for (count = 0; count < budget && is_list(iterator); count++) {
+ iterator = CDR(list_val(iterator));
}
-
- /* UNMARK ALL DELETED CELLS */
- list = B;
- m = 0; /* number of deleted elements */
- while(is_list(list)) {
- Eterm* listp = list_val(list);
- Eterm elem = CAR(listp);
- i = n;
- vp = vec_p;
- while(i--) {
- if (is_value(*vp) && eq(*vp, elem)) {
- *vp = THE_NON_VALUE;
- m++;
- break;
- }
- vp++;
- }
- list = CDR(listp);
+
+ if (!is_list(iterator) && !is_nil(iterator)) {
+ return -1;
}
-
- if (m == n) /* All deleted ? */
- res = NIL;
- else if (m == 0) /* None deleted ? */
- res = A;
- else { /* REBUILD LIST */
- res = NIL;
- need = 2*(n - m);
- hp = HAlloc(p, need);
- vp = vec_p + n - 1;
- while(vp >= vec_p) {
- if (is_value(*vp)) {
- res = CONS(hp, *vp, res);
- hp += 2;
- }
- vp--;
- }
+
+ BUMP_REDS(p, count / ELEMENTS_PER_RED);
+
+ *iterator_p = iterator;
+ *count_p += count;
+
+ if (is_nil(iterator)) {
+ return 1;
}
- if (vec_p != small_vec)
- erts_free(ERTS_ALC_T_TMP, (void *) vec_p);
- BIF_RET(res);
+
+ return 0;
}
-BIF_RETTYPE ebif_minusminus_2(BIF_ALIST_2)
-{
- return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+static int subtract_enter_naive_lhs(Process *p, ErtsSubtractContext *context) {
+ Eterm iterator;
+ int i = 0;
+
+ context->stage = SUBTRACT_STAGE_NAIVE_LHS;
+
+ context->iterator = context->rhs_original;
+ context->result = NIL;
+
+ iterator = context->lhs_original;
+
+ while (is_list(iterator)) {
+ const Eterm *cell = list_val(iterator);
+
+ ASSERT(i < SUBTRACT_LHS_THRESHOLD);
+
+ context->u.lhs_elements[i++] = CAR(cell);
+ iterator = CDR(cell);
+ }
+
+ ASSERT(i == context->lhs_remaining);
+
+ return subtract_continue(p, context);
}
-BIF_RETTYPE subtract_2(BIF_ALIST_2)
-{
- return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+static int subtract_naive_lhs(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 16;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+ while (checks < budget && is_list(context->iterator)) {
+ const Eterm *cell;
+ Eterm value, next;
+ int found_at;
+
+ cell = list_val(context->iterator);
+
+ value = CAR(cell);
+ next = CDR(cell);
+
+ for (found_at = 0; found_at < context->lhs_remaining; found_at++) {
+ if (EQ(value, context->u.lhs_elements[found_at])) {
+ /* We shift the array one step down as we have to preserve
+ * order.
+ *
+ * Note that we can't exit early as that would suppress errors
+ * in the right-hand side (this runs prior to determining the
+ * length of RHS). */
+
+ context->lhs_remaining--;
+ sys_memmove(&context->u.lhs_elements[found_at],
+ &context->u.lhs_elements[found_at + 1],
+ (context->lhs_remaining - found_at) * sizeof(Eterm));
+ break;
+ }
+ }
+
+ checks += MAX(1, context->lhs_remaining);
+ context->iterator = next;
+ }
+
+ BUMP_REDS(p, MIN(checks, budget) / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ return 0;
+ } else if (!is_nil(context->iterator)) {
+ return -1;
+ }
+
+ if (context->lhs_remaining > 0) {
+ Eterm *hp;
+ int i;
+
+ hp = HAlloc(p, context->lhs_remaining * 2);
+
+ for (i = context->lhs_remaining - 1; i >= 0; i--) {
+ Eterm value = context->u.lhs_elements[i];
+
+ context->result = CONS(hp, value, context->result);
+ hp += 2;
+ }
+ }
+
+ ASSERT(context->lhs_remaining > 0 || context->result == NIL);
+
+ return 1;
+}
+
+static int subtract_enter_naive_rhs(Process *p, ErtsSubtractContext *context) {
+ Eterm iterator;
+ int i = 0;
+
+ context->stage = SUBTRACT_STAGE_NAIVE_RHS;
+
+ context->iterator = context->lhs_original;
+ context->result_cdr = &context->result;
+ context->result = NIL;
+
+ iterator = context->rhs_original;
+
+ while (is_list(iterator)) {
+ const Eterm *cell = list_val(iterator);
+
+ ASSERT(i < SUBTRACT_RHS_THRESHOLD);
+
+ context->u.rhs_elements[i++] = CAR(cell);
+ iterator = CDR(cell);
+ }
+
+ ASSERT(i == context->rhs_remaining);
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_naive_rhs(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 16;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (checks < budget && is_list(context->iterator)) {
+ const Eterm *cell;
+ Eterm value, next;
+ int found_at;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ for (found_at = context->rhs_remaining - 1; found_at >= 0; found_at--) {
+ if (EQ(value, context->u.rhs_elements[found_at])) {
+ break;
+ }
+ }
+
+ if (found_at < 0) {
+ /* Destructively add the value to the result. This is safe
+ * since the GC is disabled and the unfinished term is never
+ * leaked to the outside world. */
+ Eterm *hp = HAllocX(p, 2, context->lhs_remaining * 2);
+
+ *context->result_cdr = make_list(hp);
+ context->result_cdr = &CDR(hp);
+
+ CAR(hp) = value;
+ } else if (found_at >= 0) {
+ Eterm swap;
+
+ if (context->rhs_remaining-- == 1) {
+ /* We've run out of items to remove, so the rest of the
+ * result will be equal to the remainder of the input. We know
+ * that LHS is well-formed as any errors would've been reported
+ * during length determination. */
+ *context->result_cdr = next;
+
+ BUMP_REDS(p, MIN(budget, checks) / CHECKS_PER_RED);
+
+ return 1;
+ }
+
+ swap = context->u.rhs_elements[context->rhs_remaining];
+ context->u.rhs_elements[found_at] = swap;
+ }
+
+ checks += context->rhs_remaining;
+ context->iterator = next;
+ context->lhs_remaining--;
+ }
+
+ /* The result only has to be terminated when returning it to the user, but
+ * we're doing it when trapping as well to prevent headaches when
+ * debugging. */
+ *context->result_cdr = NIL;
+
+ BUMP_REDS(p, MIN(budget, checks) / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ ASSERT(context->lhs_remaining > 0 && context->rhs_remaining > 0);
+ return 0;
+ }
+
+ return 1;
}
+static int subtract_enter_set_build(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_SET_BUILD;
+
+ context->u.rhs_set.alloc_start =
+ erts_alloc(ERTS_ALC_T_LIST_TRAP,
+ context->rhs_remaining * sizeof(subtract_tree_t));
+
+ context->u.rhs_set.alloc = context->u.rhs_set.alloc_start;
+ context->u.rhs_set.tree = NULL;
+
+ context->iterator = context->rhs_original;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_set_build(Process *p, ErtsSubtractContext *context) {
+ const static Sint INSERTIONS_PER_RED = 16;
+ Sint budget, insertions;
+
+ budget = INSERTIONS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ insertions = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (insertions < budget && is_list(context->iterator)) {
+ subtract_tree_t *existing_node, *new_node;
+ const Eterm *cell;
+ Eterm value, next;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ new_node = context->u.rhs_set.alloc;
+ new_node->key = value;
+ new_node->count = 1;
+
+ existing_node = subtract_rbt_lookup_insert(&context->u.rhs_set.tree,
+ new_node);
+
+ if (existing_node != NULL) {
+ existing_node->count++;
+ } else {
+ context->u.rhs_set.alloc++;
+ }
+
+ context->iterator = next;
+ insertions++;
+ }
+
+ BUMP_REDS(p, insertions / INSERTIONS_PER_RED);
+
+ ASSERT(is_list(context->iterator) || is_nil(context->iterator));
+ ASSERT(context->u.rhs_set.tree != NULL);
+
+ return is_nil(context->iterator);
+}
+
+static int subtract_enter_set_finish(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_SET_FINISH;
+
+ context->result_cdr = &context->result;
+ context->result = NIL;
+
+ context->iterator = context->lhs_original;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_set_finish(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 8;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (checks < budget && is_list(context->iterator)) {
+ subtract_tree_t *node;
+ const Eterm *cell;
+ Eterm value, next;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ ASSERT(context->rhs_remaining > 0);
+
+ node = subtract_rbt_lookup(context->u.rhs_set.tree, value);
+
+ if (node == NULL) {
+ Eterm *hp = HAllocX(p, 2, context->lhs_remaining * 2);
+
+ *context->result_cdr = make_list(hp);
+ context->result_cdr = &CDR(hp);
+
+ CAR(hp) = value;
+ } else {
+ if (context->rhs_remaining-- == 1) {
+ *context->result_cdr = next;
+
+ BUMP_REDS(p, checks / CHECKS_PER_RED);
+
+ return 1;
+ }
+
+ if (node->count-- == 1) {
+ subtract_rbt_delete(&context->u.rhs_set.tree, node);
+ }
+ }
+
+ context->iterator = next;
+ context->lhs_remaining--;
+ checks++;
+ }
+
+ *context->result_cdr = NIL;
+
+ BUMP_REDS(p, checks / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ ASSERT(context->lhs_remaining > 0 && context->rhs_remaining > 0);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int subtract_continue(Process *p, ErtsSubtractContext *context) {
+ switch (context->stage) {
+ case SUBTRACT_STAGE_START: {
+ return subtract_enter_len_lhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_LEN_LHS: {
+ int res = subtract_get_length(p,
+ &context->iterator,
+ &context->lhs_remaining);
+
+ if (res != 1) {
+ return res;
+ }
+
+ if (context->lhs_remaining <= SUBTRACT_LHS_THRESHOLD) {
+ return subtract_enter_naive_lhs(p, context);
+ }
+
+ return subtract_enter_len_rhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_NAIVE_LHS: {
+ return subtract_naive_lhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_LEN_RHS: {
+ int res = subtract_get_length(p,
+ &context->iterator,
+ &context->rhs_remaining);
+
+ if (res != 1) {
+ return res;
+ }
+
+ /* We've walked through both lists fully now so we no longer need
+ * to check for errors past this point. */
+
+ if (context->rhs_remaining <= SUBTRACT_RHS_THRESHOLD) {
+ return subtract_enter_naive_rhs(p, context);
+ }
+
+ return subtract_enter_set_build(p, context);
+ }
+
+ case SUBTRACT_STAGE_NAIVE_RHS: {
+ return subtract_naive_rhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_SET_BUILD: {
+ int res = subtract_set_build(p, context);
+
+ if (res != 1) {
+ return res;
+ }
+
+ return subtract_enter_set_finish(p, context);
+ }
+
+ case SUBTRACT_STAGE_SET_FINISH: {
+ return subtract_set_finish(p, context);
+ }
+
+ default:
+ ERTS_ASSERT(!"unreachable");
+ }
+}
+
+static int subtract_start(Process *p, Eterm lhs, Eterm rhs,
+ ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_START;
+
+ context->lhs_original = lhs;
+ context->rhs_original = rhs;
+
+ return subtract_continue(p, context);
+}
+
+/* erlang:'--'/2 */
+static Eterm subtract(Export *bif_entry, BIF_ALIST_2) {
+ Eterm lhs = BIF_ARG_1, rhs = BIF_ARG_2;
+
+ if ((is_list(lhs) || is_nil(lhs)) && (is_list(rhs) || is_nil(rhs))) {
+ /* We start with the context on the stack in the hopes that we won't
+ * have to trap. */
+ ErtsSubtractContext context;
+ int res;
+
+ res = subtract_start(BIF_P, lhs, rhs, &context);
+
+ if (res == 0) {
+ Eterm state_mref;
+
+ state_mref = subtract_create_trap_state(BIF_P, &context);
+ erts_set_gc_state(BIF_P, 0);
+
+ BIF_TRAP2(bif_entry, BIF_P, state_mref, NIL);
+ }
+
+ subtract_ctx_dtor(&context);
+
+ if (res < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ BIF_RET(context.result);
+ } else if (is_internal_magic_ref(lhs)) {
+ ErtsSubtractContext *context;
+ int (*dtor)(Binary*);
+ Binary *magic_bin;
+
+ int res;
+
+ magic_bin = erts_magic_ref2bin(lhs);
+ dtor = ERTS_MAGIC_BIN_DESTRUCTOR(magic_bin);
+
+ if (dtor != subtract_ctx_bin_dtor) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+ ASSERT(rhs == NIL);
+
+ context = ERTS_MAGIC_BIN_DATA(magic_bin);
+ res = subtract_continue(BIF_P, context);
+
+ if (res == 0) {
+ BIF_TRAP2(bif_entry, BIF_P, lhs, NIL);
+ }
+
+ erts_set_gc_state(BIF_P, 1);
+
+ if (res < 0) {
+ ERTS_BIF_ERROR_TRAPPED2(BIF_P, BADARG, bif_entry,
+ context->lhs_original,
+ context->rhs_original);
+ }
+
+ BIF_RET(context->result);
+ }
+
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE ebif_minusminus_2(BIF_ALIST_2) {
+ return subtract(bif_export[BIF_ebif_minusminus_2], BIF_CALL_ARGS);
+}
+
+BIF_RETTYPE subtract_2(BIF_ALIST_2) {
+ return subtract(bif_export[BIF_subtract_2], BIF_CALL_ARGS);
+}
+
+
BIF_RETTYPE lists_member_2(BIF_ALIST_2)
{
Eterm term;
@@ -283,7 +905,7 @@ static BIF_RETTYPE lists_reverse_alloc(Process *c_p,
{
static const Uint CELLS_PER_RED = 40;
- Eterm *heap_top, *heap_end;
+ Eterm *alloc_top, *alloc_end;
Uint cells_left, max_cells;
Eterm list, tail;
Eterm lookahead;
@@ -305,18 +927,18 @@ static BIF_RETTYPE lists_reverse_alloc(Process *c_p,
BIF_ERROR(c_p, BADARG);
}
- heap_top = HAlloc(c_p, 2 * (max_cells - cells_left));
- heap_end = heap_top + 2 * (max_cells - cells_left);
+ alloc_top = HAlloc(c_p, 2 * (max_cells - cells_left));
+ alloc_end = alloc_top + 2 * (max_cells - cells_left);
- while (heap_top < heap_end) {
+ while (alloc_top < alloc_end) {
Eterm *pair = list_val(list);
- tail = CONS(heap_top, CAR(pair), tail);
+ tail = CONS(alloc_top, CAR(pair), tail);
list = CDR(pair);
ASSERT(is_list(list) || is_nil(list));
- heap_top += 2;
+ alloc_top += 2;
}
if (is_nil(list)) {
@@ -333,7 +955,7 @@ static BIF_RETTYPE lists_reverse_onheap(Process *c_p,
{
static const Uint CELLS_PER_RED = 60;
- Eterm *heap_top, *heap_end;
+ Eterm *alloc_start, *alloc_top, *alloc_end;
Uint cells_left, max_cells;
Eterm list, tail;
@@ -343,21 +965,27 @@ static BIF_RETTYPE lists_reverse_onheap(Process *c_p,
cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p));
ASSERT(HEAP_LIMIT(c_p) >= HEAP_TOP(c_p) + 2);
- heap_end = HEAP_LIMIT(c_p) - 2;
- heap_top = HEAP_TOP(c_p);
+ alloc_start = HEAP_TOP(c_p);
+ alloc_end = HEAP_LIMIT(c_p) - 2;
+ alloc_top = alloc_start;
+
+ /* Don't process more cells than we have reductions for. */
+ alloc_end = MIN(alloc_top + (cells_left * 2), alloc_end);
- while (heap_top < heap_end && is_list(list)) {
+ while (alloc_top < alloc_end && is_list(list)) {
Eterm *pair = list_val(list);
- tail = CONS(heap_top, CAR(pair), tail);
+ tail = CONS(alloc_top, CAR(pair), tail);
list = CDR(pair);
- heap_top += 2;
+ alloc_top += 2;
}
- cells_left -= (heap_top - heap_end) / 2;
+ cells_left -= (alloc_top - alloc_start) / 2;
+ HEAP_TOP(c_p) = alloc_top;
+
+ ASSERT(cells_left >= 0 && cells_left <= max_cells);
BUMP_REDS(c_p, (max_cells - cells_left) / CELLS_PER_RED);
- HEAP_TOP(c_p) = heap_top;
if (is_nil(list)) {
BIF_RET(tail);
diff --git a/erts/emulator/beam/erl_bif_persistent.c b/erts/emulator/beam/erl_bif_persistent.c
new file mode 100644
index 0000000000..9dca768a18
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_persistent.c
@@ -0,0 +1,983 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: Implement persistent term storage.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "erl_map.h"
+#include "erl_binary.h"
+
+/*
+ * The limit for the number of persistent terms before
+ * a warning is issued.
+ */
+
+#define WARNING_LIMIT 20000
+#define XSTR(s) STR(s)
+#define STR(s) #s
+
+/*
+ * Parameters for the hash table.
+ */
+#define INITIAL_SIZE 8
+#define LOAD_FACTOR ((Uint)50)
+#define MUST_GROW(t) (((Uint)100) * t->num_entries >= LOAD_FACTOR * t->allocated)
+#define MUST_SHRINK(t) (((Uint)200) * t->num_entries <= LOAD_FACTOR * t->allocated && \
+ t->allocated > INITIAL_SIZE)
+
+typedef struct hash_table {
+ Uint allocated;
+ Uint num_entries;
+ Uint mask;
+ Uint first_to_delete;
+ Uint num_to_delete;
+ erts_atomic_t refc;
+ struct hash_table* delete_next;
+ ErtsThrPrgrLaterOp thr_prog_op;
+ Eterm term[1];
+} HashTable;
+
+typedef struct trap_data {
+ HashTable* table;
+ Uint idx;
+ Uint remaining;
+ Uint memory; /* Used by info/0 to count used memory */
+} TrapData;
+
+/*
+ * Declarations of local functions.
+ */
+
+static HashTable* create_initial_table(void);
+static Uint lookup(HashTable* hash_table, Eterm key);
+static HashTable* copy_table(HashTable* old_table, Uint new_size, int rehash);
+static HashTable* tmp_table_copy(HashTable* old_table);
+static int try_seize_update_permission(Process* c_p);
+static void release_update_permission(int release_updater);
+static void table_updater(void* table);
+static void table_deleter(void* hash_table);
+static void dec_table_refc(Process* c_p, HashTable* old_table);
+static void delete_table(Process* c_p, HashTable* table);
+static void mark_for_deletion(HashTable* hash_table, Uint entry_index);
+static ErtsLiteralArea* term_to_area(Eterm tuple);
+static void suspend_updater(Process* c_p);
+static Eterm do_get_all(Process* c_p, TrapData* trap_data, Eterm res);
+static Eterm do_info(Process* c_p, TrapData* trap_data);
+static void append_to_delete_queue(HashTable* table);
+static HashTable* next_to_delete(void);
+static Eterm alloc_trap_data(Process* c_p);
+static int cleanup_trap_data(Binary *bp);
+
+/*
+ * Traps
+ */
+
+static Export persistent_term_get_all_export;
+static BIF_RETTYPE persistent_term_get_all_trap(BIF_ALIST_2);
+static Export persistent_term_info_export;
+static BIF_RETTYPE persistent_term_info_trap(BIF_ALIST_1);
+
+/*
+ * Pointer to the current hash table.
+ */
+
+static erts_atomic_t the_hash_table;
+
+/*
+ * Queue of processes waiting to update the hash table.
+ */
+
+struct update_queue_item {
+ Process *p;
+ struct update_queue_item* next;
+};
+
+static erts_mtx_t update_table_permission_mtx;
+static struct update_queue_item* update_queue = NULL;
+static Process* updater_process = NULL;
+
+/* Protected by update_table_permission_mtx */
+static ErtsThrPrgrLaterOp thr_prog_op;
+static int issued_warning = 0;
+
+/*
+ * Queue of hash tables to be deleted.
+ */
+
+static erts_mtx_t delete_queue_mtx;
+static HashTable* delete_queue_head = NULL;
+static HashTable** delete_queue_tail = &delete_queue_head;
+
+/*
+ * The following variables are only used during crash dumping. They
+ * are intialized by erts_init_persistent_dumping().
+ */
+
+ErtsLiteralArea** erts_persistent_areas;
+Uint erts_num_persistent_areas;
+
+void erts_init_bif_persistent_term(void)
+{
+ HashTable* hash_table;
+
+ /*
+ * Initialize the mutex protecting updates.
+ */
+
+ erts_mtx_init(&update_table_permission_mtx,
+ "update_persistent_term_permission",
+ NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC |
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ /*
+ * Initialize delete queue.
+ */
+
+ erts_mtx_init(&delete_queue_mtx,
+ "persistent_term_delete_permission",
+ NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC |
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ /*
+ * Allocate a small initial hash table.
+ */
+
+ hash_table = create_initial_table();
+ erts_atomic_init_nob(&the_hash_table, (erts_aint_t)hash_table);
+
+ /*
+ * Initialize export entry for traps
+ */
+
+ erts_init_trap_export(&persistent_term_get_all_export,
+ am_persistent_term, am_get_all_trap, 2,
+ &persistent_term_get_all_trap);
+ erts_init_trap_export(&persistent_term_info_export,
+ am_persistent_term, am_info_trap, 1,
+ &persistent_term_info_trap);
+}
+
+BIF_RETTYPE persistent_term_put_2(BIF_ALIST_2)
+{
+ Eterm key;
+ Eterm term;
+ Eterm heap[3];
+ Eterm tuple;
+ HashTable* hash_table;
+ Uint term_size;
+ Uint lit_area_size;
+ ErlOffHeap code_off_heap;
+ ErtsLiteralArea* literal_area;
+ erts_shcopy_t info;
+ Eterm* ptr;
+ Uint entry_index;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_persistent_term_put_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
+ hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+
+ key = BIF_ARG_1;
+ term = BIF_ARG_2;
+
+ entry_index = lookup(hash_table, key);
+
+ heap[0] = make_arityval(2);
+ heap[1] = key;
+ heap[2] = term;
+ tuple = make_tuple(heap);
+
+ if (is_nil(hash_table->term[entry_index])) {
+ Uint size = hash_table->allocated;
+ if (MUST_GROW(hash_table)) {
+ size *= 2;
+ }
+ hash_table = copy_table(hash_table, size, 0);
+ entry_index = lookup(hash_table, key);
+ hash_table->num_entries++;
+ } else {
+ Eterm tuple = hash_table->term[entry_index];
+ Eterm old_term;
+
+ ASSERT(is_tuple_arity(tuple, 2));
+ old_term = boxed_val(tuple)[2];
+ if (EQ(term, old_term)) {
+ /* Same value. No need to update anything. */
+ release_update_permission(0);
+ BIF_RET(am_ok);
+ } else {
+ /* Mark the old term for deletion. */
+ mark_for_deletion(hash_table, entry_index);
+ hash_table = copy_table(hash_table, hash_table->allocated, 0);
+ }
+ }
+
+ /*
+ * Preserve internal sharing in the term by using the
+ * sharing-preserving functions. However, literals must
+ * be copied in case the module holding them are unloaded.
+ */
+ INITIALIZE_SHCOPY(info);
+ info.copy_literals = 1;
+ term_size = copy_shared_calculate(tuple, &info);
+ ERTS_INIT_OFF_HEAP(&code_off_heap);
+ lit_area_size = ERTS_LITERAL_AREA_ALLOC_SIZE(term_size);
+ literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_area_size);
+ ptr = &literal_area->start[0];
+ literal_area->end = ptr + term_size;
+ tuple = copy_shared_perform(tuple, term_size, &info, &ptr, &code_off_heap);
+ ASSERT(tuple_val(tuple) == literal_area->start);
+ literal_area->off_heap = code_off_heap.first;
+ DESTROY_SHCOPY(info);
+ erts_set_literal_tag(&tuple, literal_area->start, term_size);
+ hash_table->term[entry_index] = tuple;
+
+ erts_schedule_thr_prgr_later_op(table_updater, hash_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+
+ /*
+ * Issue a warning once if the warning limit has been exceeded.
+ */
+
+ if (hash_table->num_entries > WARNING_LIMIT && issued_warning == 0) {
+ static char w[] =
+ "More than " XSTR(WARNING_LIMIT) " persistent terms "
+ "have been created.\n"
+ "It is recommended to avoid creating an excessive number of\n"
+ "persistent terms, as creation and deletion of persistent terms\n"
+ "will be slower as the number of persistent terms increases.\n";
+ issued_warning = 1;
+ erts_send_warning_to_logger_str(BIF_P->group_leader, w);
+ }
+
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
+}
+
+BIF_RETTYPE persistent_term_get_0(BIF_ALIST_0)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ TrapData* trap_data;
+ Eterm res = NIL;
+ Eterm magic_ref;
+ Binary* mbp;
+
+ magic_ref = alloc_trap_data(BIF_P);
+ mbp = erts_magic_ref2bin(magic_ref);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ trap_data->table = hash_table;
+ trap_data->idx = 0;
+ trap_data->remaining = hash_table->num_entries;
+ res = do_get_all(BIF_P, trap_data, res);
+ if (trap_data->remaining == 0) {
+ BUMP_REDS(BIF_P, hash_table->num_entries);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BIF_RET(res);
+ } else {
+ /*
+ * Increment the ref counter to prevent an update operation (by put/2
+ * or erase/1) to delete this hash table.
+ */
+ erts_atomic_inc_nob(&hash_table->refc);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_get_all_export, BIF_P, magic_ref, res);
+ }
+}
+
+BIF_RETTYPE persistent_term_get_1(BIF_ALIST_1)
+{
+ Eterm key = BIF_ARG_1;
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ Uint entry_index;
+ Eterm term;
+
+ entry_index = lookup(hash_table, key);
+ term = hash_table->term[entry_index];
+ if (is_boxed(term)) {
+ ASSERT(is_tuple_arity(term, 2));
+ BIF_RET(tuple_val(term)[2]);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1)
+{
+ Eterm key = BIF_ARG_1;
+ HashTable* old_table;
+ HashTable* new_table;
+ Uint entry_index;
+ Eterm old_term;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD1(bif_export[BIF_persistent_term_erase_1],
+ BIF_P, BIF_ARG_1);
+ }
+
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ entry_index = lookup(old_table, key);
+ old_term = old_table->term[entry_index];
+ if (is_boxed(old_term)) {
+ Uint new_size;
+ HashTable* tmp_table;
+
+ /*
+ * Since we don't use any delete markers, we must rehash
+ * the table when deleting terms to ensure that all terms
+ * can still be reached if there are hash collisions.
+ * We can't rehash in place and it would not be safe to modify
+ * the old table yet, so we will first need a new
+ * temporary table copy of the same size as the old one.
+ */
+
+ ASSERT(is_tuple_arity(old_term, 2));
+ tmp_table = tmp_table_copy(old_table);
+
+ /*
+ * Delete the term from the temporary table. Then copy the
+ * temporary table to a new table, rehashing the entries
+ * while copying.
+ */
+
+ tmp_table->term[entry_index] = NIL;
+ tmp_table->num_entries--;
+ new_size = tmp_table->allocated;
+ if (MUST_SHRINK(tmp_table)) {
+ new_size /= 2;
+ }
+ new_table = copy_table(tmp_table, new_size, 1);
+ erts_free(ERTS_ALC_T_TMP, tmp_table);
+
+ mark_for_deletion(old_table, entry_index);
+ erts_schedule_thr_prgr_later_op(table_updater, new_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ }
+
+ /*
+ * Key is not present. Nothing to do.
+ */
+
+ ASSERT(is_nil(old_term));
+ release_update_permission(0);
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE erts_internal_erase_persistent_terms_0(BIF_ALIST_0)
+{
+ HashTable* old_table;
+ HashTable* new_table;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD0(bif_export[BIF_erts_internal_erase_persistent_terms_0],
+ BIF_P);
+ }
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ old_table->first_to_delete = 0;
+ old_table->num_to_delete = old_table->allocated;
+ new_table = create_initial_table();
+ erts_schedule_thr_prgr_later_op(table_updater, new_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+}
+
+BIF_RETTYPE persistent_term_info_0(BIF_ALIST_0)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ TrapData* trap_data;
+ Eterm res = NIL;
+ Eterm magic_ref;
+ Binary* mbp;
+
+ magic_ref = alloc_trap_data(BIF_P);
+ mbp = erts_magic_ref2bin(magic_ref);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ trap_data->table = hash_table;
+ trap_data->idx = 0;
+ trap_data->remaining = hash_table->num_entries;
+ trap_data->memory = 0;
+ res = do_info(BIF_P, trap_data);
+ if (trap_data->remaining == 0) {
+ BUMP_REDS(BIF_P, hash_table->num_entries);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BIF_RET(res);
+ } else {
+ /*
+ * Increment the ref counter to prevent an update operation (by put/2
+ * or erase/1) to delete this hash table.
+ */
+ erts_atomic_inc_nob(&hash_table->refc);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_info_export, BIF_P, magic_ref, res);
+ }
+}
+
+Uint
+erts_persistent_term_count(void)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ return hash_table->num_entries;
+}
+
+void
+erts_init_persistent_dumping(void)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ ErtsLiteralArea** area_p;
+ Uint i;
+
+ /*
+ * Overwrite the array of Eterms in the current hash table
+ * with pointers to literal areas.
+ */
+
+ erts_persistent_areas = (ErtsLiteralArea **) hash_table->term;
+ erts_num_persistent_areas = hash_table->num_entries;
+ area_p = erts_persistent_areas;
+ for (i = 0; i < hash_table->allocated; i++) {
+ Eterm term = hash_table->term[i];
+
+ if (is_boxed(term)) {
+ *area_p++ = term_to_area(term);
+ }
+ }
+}
+
+/*
+ * Local functions.
+ */
+
+static HashTable*
+create_initial_table(void)
+{
+ HashTable* hash_table;
+ int i;
+
+ hash_table = (HashTable *) erts_alloc(ERTS_ALC_T_PERSISTENT_TERM,
+ sizeof(HashTable)+sizeof(Eterm) *
+ (INITIAL_SIZE-1));
+ hash_table->allocated = INITIAL_SIZE;
+ hash_table->num_entries = 0;
+ hash_table->mask = INITIAL_SIZE-1;
+ hash_table->first_to_delete = 0;
+ hash_table->num_to_delete = 0;
+ erts_atomic_init_nob(&hash_table->refc, (erts_aint_t)1);
+ for (i = 0; i < INITIAL_SIZE; i++) {
+ hash_table->term[i] = NIL;
+ }
+ return hash_table;
+}
+
+static BIF_RETTYPE
+persistent_term_get_all_trap(BIF_ALIST_2)
+{
+ TrapData* trap_data;
+ Eterm res = BIF_ARG_2;
+ Uint bump_reds;
+ Binary* mbp;
+
+ ASSERT(is_list(BIF_ARG_2));
+ mbp = erts_magic_ref2bin(BIF_ARG_1);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ bump_reds = trap_data->remaining;
+ res = do_get_all(BIF_P, trap_data, res);
+ ASSERT(is_list(res));
+ if (trap_data->remaining > 0) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_get_all_export, BIF_P, BIF_ARG_1, res);
+ } else {
+ /*
+ * Decrement ref count (and possibly delete the hash table
+ * and associated literal area).
+ */
+ dec_table_refc(BIF_P, trap_data->table);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BUMP_REDS(BIF_P, bump_reds);
+ BIF_RET(res);
+ }
+}
+
+static Eterm
+do_get_all(Process* c_p, TrapData* trap_data, Eterm res)
+{
+ HashTable* hash_table;
+ Uint remaining;
+ Uint idx;
+ Uint max_iter;
+ Uint i;
+ Eterm* hp;
+ Uint heap_size;
+ struct copy_term {
+ Uint key_size;
+ Eterm* tuple_ptr;
+ } *copy_data;
+
+ hash_table = trap_data->table;
+ idx = trap_data->idx;
+#if defined(DEBUG) || defined(VALGRIND)
+ max_iter = 50;
+#else
+ max_iter = ERTS_BIF_REDS_LEFT(c_p);
+#endif
+ remaining = trap_data->remaining < max_iter ?
+ trap_data->remaining : max_iter;
+ trap_data->remaining -= remaining;
+
+ copy_data = (struct copy_term *) erts_alloc(ERTS_ALC_T_TMP,
+ remaining *
+ sizeof(struct copy_term));
+ i = 0;
+ heap_size = (2 + 3) * remaining;
+ while (remaining != 0) {
+ Eterm term = hash_table->term[idx];
+ if (is_tuple(term)) {
+ Uint key_size;
+ Eterm* tup_val;
+
+ ASSERT(is_tuple_arity(term, 2));
+ tup_val = tuple_val(term);
+ key_size = size_object(tup_val[1]);
+ copy_data[i].key_size = key_size;
+ copy_data[i].tuple_ptr = tup_val;
+ heap_size += key_size;
+ i++;
+ remaining--;
+ }
+ idx++;
+ }
+ trap_data->idx = idx;
+
+ hp = HAlloc(c_p, heap_size);
+ remaining = i;
+ for (i = 0; i < remaining; i++) {
+ Eterm* tuple_ptr;
+ Uint key_size;
+ Eterm key;
+ Eterm tup;
+
+ tuple_ptr = copy_data[i].tuple_ptr;
+ key_size = copy_data[i].key_size;
+ key = copy_struct(tuple_ptr[1], key_size, &hp, &c_p->off_heap);
+ tup = TUPLE2(hp, key, tuple_ptr[2]);
+ hp += 3;
+ res = CONS(hp, tup, res);
+ hp += 2;
+ }
+ erts_free(ERTS_ALC_T_TMP, copy_data);
+ return res;
+}
+
+static BIF_RETTYPE
+persistent_term_info_trap(BIF_ALIST_1)
+{
+ TrapData* trap_data = (TrapData *) BIF_ARG_1;
+ Eterm res;
+ Uint bump_reds;
+ Binary* mbp;
+
+ mbp = erts_magic_ref2bin(BIF_ARG_1);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ bump_reds = trap_data->remaining;
+ res = do_info(BIF_P, trap_data);
+ if (trap_data->remaining > 0) {
+ ASSERT(res == am_ok);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(&persistent_term_info_export, BIF_P, BIF_ARG_1);
+ } else {
+ /*
+ * Decrement ref count (and possibly delete the hash table
+ * and associated literal area).
+ */
+ dec_table_refc(BIF_P, trap_data->table);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BUMP_REDS(BIF_P, bump_reds);
+ ASSERT(is_map(res));
+ BIF_RET(res);
+ }
+}
+
+#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+
+static Eterm
+do_info(Process* c_p, TrapData* trap_data)
+{
+ HashTable* hash_table;
+ Uint remaining;
+ Uint idx;
+ Uint max_iter;
+
+ hash_table = trap_data->table;
+ idx = trap_data->idx;
+#if defined(DEBUG) || defined(VALGRIND)
+ max_iter = 50;
+#else
+ max_iter = ERTS_BIF_REDS_LEFT(c_p);
+#endif
+ remaining = trap_data->remaining < max_iter ? trap_data->remaining : max_iter;
+ trap_data->remaining -= remaining;
+ while (remaining != 0) {
+ if (is_boxed(hash_table->term[idx])) {
+ ErtsLiteralArea* area;
+ area = term_to_area(hash_table->term[idx]);
+ trap_data->memory += sizeof(ErtsLiteralArea) +
+ sizeof(Eterm) * (area->end - area->start - 1);
+ remaining--;
+ }
+ idx++;
+ }
+ trap_data->idx = idx;
+ if (trap_data->remaining > 0) {
+ return am_ok; /* Dummy return value */
+ } else {
+ Eterm* hp;
+ Eterm count_term;
+ Eterm memory_term;
+ Eterm res;
+ Uint memory;
+ Uint hsz = MAP_SZ(2);
+
+ memory = sizeof(HashTable) + (trap_data->table->allocated-1) *
+ sizeof(Eterm) + trap_data->memory;
+ (void) erts_bld_uint(NULL, &hsz, hash_table->num_entries);
+ (void) erts_bld_uint(NULL, &hsz, memory);
+ hp = HAlloc(c_p, hsz);
+ count_term = erts_bld_uint(&hp, NULL, hash_table->num_entries);
+ memory_term = erts_bld_uint(&hp, NULL, memory);
+ res = MAP2(hp, am_count, count_term, am_memory, memory_term);
+ return res;
+ }
+}
+
+#undef DECL_AM
+
+static Eterm
+alloc_trap_data(Process* c_p)
+{
+ Binary* mbp = erts_create_magic_binary(sizeof(TrapData),
+ cleanup_trap_data);
+ Eterm* hp;
+
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
+}
+
+static int
+cleanup_trap_data(Binary *bp)
+{
+ TrapData* trap_data = ERTS_MAGIC_BIN_DATA(bp);
+
+ if (trap_data->table) {
+ /*
+ * The process has been killed and is now exiting.
+ * Decrement the reference counter for the table.
+ */
+ dec_table_refc(NULL, trap_data->table);
+ }
+ return 1;
+}
+
+static Uint
+lookup(HashTable* hash_table, Eterm key)
+{
+ Uint mask = hash_table->mask;
+ Eterm* table = hash_table->term;
+ Uint32 idx = make_internal_hash(key, 0);
+ Eterm term;
+
+ do {
+ idx++;
+ term = table[idx & mask];
+ } while (is_boxed(term) && !EQ(key, (tuple_val(term))[1]));
+ return idx & mask;
+}
+
+static HashTable*
+tmp_table_copy(HashTable* old_table)
+{
+ Uint size = old_table->allocated;
+ HashTable* tmp_table;
+ Uint i;
+
+ tmp_table = (HashTable *) erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(HashTable) +
+ sizeof(Eterm) * (size-1));
+ *tmp_table = *old_table;
+ for (i = 0; i < size; i++) {
+ tmp_table->term[i] = old_table->term[i];
+ }
+ return tmp_table;
+}
+
+static HashTable*
+copy_table(HashTable* old_table, Uint new_size, int rehash)
+{
+ HashTable* new_table;
+ Uint old_size = old_table->allocated;
+ Uint i;
+
+ new_table = (HashTable *) erts_alloc(ERTS_ALC_T_PERSISTENT_TERM,
+ sizeof(HashTable) +
+ sizeof(Eterm) * (new_size-1));
+ if (old_table->allocated == new_size && !rehash) {
+ /*
+ * Same size and no key deleted. Make an exact copy of the table.
+ */
+ *new_table = *old_table;
+ for (i = 0; i < new_size; i++) {
+ new_table->term[i] = old_table->term[i];
+ }
+ } else {
+ /*
+ * The size of the table has changed or an element has been
+ * deleted. Must rehash, by inserting all old terms into the
+ * new (empty) table.
+ */
+ new_table->allocated = new_size;
+ new_table->num_entries = old_table->num_entries;
+ new_table->mask = new_size - 1;
+ for (i = 0; i < new_size; i++) {
+ new_table->term[i] = NIL;
+ }
+ for (i = 0; i < old_size; i++) {
+ if (is_tuple(old_table->term[i])) {
+ Eterm key = tuple_val(old_table->term[i])[1];
+ Uint entry_index = lookup(new_table, key);
+ ASSERT(is_nil(new_table->term[entry_index]));
+ new_table->term[entry_index] = old_table->term[i];
+ }
+ }
+ }
+ new_table->first_to_delete = 0;
+ new_table->num_to_delete = 0;
+ erts_atomic_init_nob(&new_table->refc, (erts_aint_t)1);
+ return new_table;
+}
+
+static void
+mark_for_deletion(HashTable* hash_table, Uint entry_index)
+{
+ hash_table->first_to_delete = entry_index;
+ hash_table->num_to_delete = 1;
+}
+
+static ErtsLiteralArea*
+term_to_area(Eterm tuple)
+{
+ ASSERT(is_tuple_arity(tuple, 2));
+ return (ErtsLiteralArea *) (((char *) tuple_val(tuple)) -
+ offsetof(ErtsLiteralArea, start));
+}
+
+static void
+table_updater(void* data)
+{
+ HashTable* old_table;
+ HashTable* new_table;
+
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ new_table = (HashTable *) data;
+ ASSERT(new_table->num_to_delete == 0);
+ erts_atomic_set_nob(&the_hash_table, (erts_aint_t)new_table);
+ append_to_delete_queue(old_table);
+ erts_schedule_thr_prgr_later_op(table_deleter,
+ old_table,
+ &old_table->thr_prog_op);
+ release_update_permission(1);
+}
+
+static void
+table_deleter(void* data)
+{
+ HashTable* old_table = (HashTable *) data;
+
+ dec_table_refc(NULL, old_table);
+}
+
+static void
+dec_table_refc(Process* c_p, HashTable* old_table)
+{
+ erts_aint_t refc = erts_atomic_dec_read_nob(&old_table->refc);
+
+ if (refc == 0) {
+ HashTable* to_delete;
+
+ while ((to_delete = next_to_delete()) != NULL) {
+ delete_table(c_p, to_delete);
+ }
+ }
+}
+
+static void
+delete_table(Process* c_p, HashTable* table)
+{
+ Uint idx = table->first_to_delete;
+ Uint n = table->num_to_delete;
+
+ /*
+ * There are no longer any references to this hash table.
+ *
+ * Any literals pointed for deletion can be queued for
+ * deletion and the table itself can be deallocated.
+ */
+
+#ifdef DEBUG
+ if (n == 1) {
+ ASSERT(is_tuple_arity(table->term[idx], 2));
+ }
+#endif
+
+ while (n > 0) {
+ Eterm term = table->term[idx];
+
+ if (is_tuple_arity(term, 2)) {
+ if (is_immed(tuple_val(term)[2])) {
+ erts_release_literal_area(term_to_area(term));
+ } else {
+ erts_queue_release_literals(c_p, term_to_area(term));
+ }
+ }
+ idx++, n--;
+ }
+ erts_free(ERTS_ALC_T_PERSISTENT_TERM, table);
+}
+
+/*
+ * Caller *must* yield if this function returns 0.
+ */
+
+static int
+try_seize_update_permission(Process* c_p)
+{
+ int success;
+
+ ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */
+ ASSERT(c_p != NULL);
+
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process != c_p);
+ success = (updater_process == NULL);
+ if (success) {
+ updater_process = c_p;
+ } else {
+ struct update_queue_item* qitem;
+ qitem = erts_alloc(ERTS_ALC_T_PERSISTENT_LOCK_Q, sizeof(*qitem));
+ qitem->p = c_p;
+ erts_proc_inc_refc(c_p);
+ qitem->next = update_queue;
+ update_queue = qitem;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ }
+ erts_mtx_unlock(&update_table_permission_mtx);
+ return success;
+}
+
+static void
+release_update_permission(int release_updater)
+{
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process != NULL);
+
+ if (release_updater) {
+ erts_proc_lock(updater_process, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(updater_process)) {
+ erts_resume(updater_process, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_proc_unlock(updater_process, ERTS_PROC_LOCK_STATUS);
+ }
+ updater_process = NULL;
+
+ while (update_queue != NULL) { /* Unleash the entire herd */
+ struct update_queue_item* qitem = update_queue;
+ erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(qitem->p)) {
+ erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ update_queue = qitem->next;
+ erts_proc_dec_refc(qitem->p);
+ erts_free(ERTS_ALC_T_PERSISTENT_LOCK_Q, qitem);
+ }
+ erts_mtx_unlock(&update_table_permission_mtx);
+}
+
+static void
+suspend_updater(Process* c_p)
+{
+#ifdef DEBUG
+ ASSERT(c_p != NULL);
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process == c_p);
+ erts_mtx_unlock(&update_table_permission_mtx);
+#endif
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+}
+
+static void
+append_to_delete_queue(HashTable* table)
+{
+ erts_mtx_lock(&delete_queue_mtx);
+ table->delete_next = NULL;
+ *delete_queue_tail = table;
+ delete_queue_tail = &table->delete_next;
+ erts_mtx_unlock(&delete_queue_mtx);
+}
+
+static HashTable*
+next_to_delete(void)
+{
+ HashTable* table;
+
+ erts_mtx_lock(&delete_queue_mtx);
+ table = delete_queue_head;
+ if (table) {
+ if (erts_atomic_read_nob(&table->refc)) {
+ /*
+ * This hash table is still referenced. Hash tables
+ * must be deleted in order, so we return a NULL
+ * pointer.
+ */
+ table = NULL;
+ } else {
+ /*
+ * Remove the first hash table from the queue.
+ */
+ delete_queue_head = table->delete_next;
+ if (delete_queue_head == NULL) {
+ delete_queue_tail = &delete_queue_head;
+ }
+ }
+ }
+ erts_mtx_unlock(&delete_queue_mtx);
+ return table;
+}
diff --git a/erts/emulator/beam/erl_bif_unique.h b/erts/emulator/beam/erl_bif_unique.h
index 40b70667c0..944788c67c 100644
--- a/erts/emulator/beam/erl_bif_unique.h
+++ b/erts/emulator/beam/erl_bif_unique.h
@@ -242,11 +242,11 @@ erts_internal_ref_number_cmp(Uint32 num1[ERTS_REF_NUMBERS],
Uint32 num2[ERTS_REF_NUMBERS])
{
if (num1[2] != num2[2])
- return (int) ((Sint64) num1[2] - (Sint64) num2[2]);
+ return num1[2] > num2[2] ? 1 : -1;
if (num1[1] != num2[1])
- return (int) ((Sint64) num1[1] - (Sint64) num2[1]);
+ return num1[1] > num2[1] ? 1 : -1;
if (num1[0] != num2[0])
- return (int) ((Sint64) num1[0] - (Sint64) num2[0]);
+ return num1[0] > num2[0] ? 1 : -1;
return 0;
}
diff --git a/erts/emulator/beam/erl_dirty_bif.tab b/erts/emulator/beam/erl_dirty_bif.tab
index 086275fbe5..20299ff604 100644
--- a/erts/emulator/beam/erl_dirty_bif.tab
+++ b/erts/emulator/beam/erl_dirty_bif.tab
@@ -59,8 +59,6 @@ dirty-cpu erts_debug:lcnt_clear/0
dirty-cpu-test erlang:'++'/2
dirty-cpu-test erlang:append/2
-dirty-cpu-test erlang:'--'/2
-dirty-cpu-test erlang:subtract/2
dirty-cpu-test erlang:iolist_size/1
dirty-cpu-test erlang:make_tuple/2
dirty-cpu-test erlang:make_tuple/3
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index a65dbbf42b..47dd115c82 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -1133,9 +1133,28 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, 0,
p->arg_reg, p->arity, fcalls,
ygen_usage);
+ if (ERTS_PROC_IS_EXITING(p)) {
+ return 0;
+ }
ASSERT(!(p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)));
+ if (MAX_HEAP_SIZE_GET(p)) {
+ Uint new_heap_size;
+ Uint old_heap_size;
+ Uint total_heap_size;
+
+ new_heap_size = HEAP_END(p) - HEAP_START(p);
+ old_heap_size = erts_next_heap_size(lit_size, 0);
+ total_heap_size = new_heap_size + old_heap_size;
+ if (MAX_HEAP_SIZE_GET(p) < total_heap_size &&
+ reached_max_heap_size(p, total_heap_size,
+ new_heap_size, old_heap_size)) {
+ erts_set_self_exiting(p, am_killed);
+ return 0;
+ }
+ }
+
/*
* Set GC state.
*/
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 57c6c10c7f..f687dcf335 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -354,6 +354,7 @@ erl_init(int ncpu,
erts_init_bif();
erts_init_bif_chksum();
erts_init_bif_binary();
+ erts_init_bif_persistent_term();
erts_init_bif_re();
erts_init_unicode(); /* after RE to get access to PCRE unicode */
erts_init_external();
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 463ae898a3..1416c5f96c 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -97,6 +97,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proc_btm", "pid" },
{ "dist_entry", "address" },
{ "dist_entry_links", "address" },
+ { "update_persistent_term_permission", NULL },
+ { "persistent_term_delete_permission", NULL },
{ "code_write_permission", NULL },
{ "purge_state", NULL },
{ "proc_status", "pid" },
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 243db4c734..ac5054ea10 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -58,6 +58,7 @@ static void dump_externally(fmtfn_t to, void *to_arg, Eterm term);
static void mark_literal(Eterm* ptr);
static void init_literal_areas(void);
static void dump_literals(fmtfn_t to, void *to_arg);
+static void dump_persistent_terms(fmtfn_t to, void *to_arg);
static void dump_module_literals(fmtfn_t to, void *to_arg,
ErtsLiteralArea* lit_area);
@@ -74,6 +75,7 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg)
all_binaries = NULL;
init_literal_areas();
+ erts_init_persistent_dumping();
for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
@@ -93,6 +95,7 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg)
}
}
+ dump_persistent_terms(to, to_arg);
dump_literals(to, to_arg);
dump_binaries(to, to_arg, all_binaries);
}
@@ -775,6 +778,9 @@ init_literal_areas(void)
qsort(lit_areas, num_lit_areas, sizeof(ErtsLiteralArea *),
compare_areas);
+ qsort(erts_persistent_areas, erts_num_persistent_areas,
+ sizeof(ErtsLiteralArea *), compare_areas);
+
erts_runlock_old_code(code_ix);
}
@@ -796,6 +802,13 @@ static void mark_literal(Eterm* ptr)
ap = bsearch(ptr, lit_areas, num_lit_areas, sizeof(ErtsLiteralArea*),
search_areas);
+ if (ap == 0) {
+ ap = bsearch(ptr, erts_persistent_areas,
+ erts_num_persistent_areas,
+ sizeof(ErtsLiteralArea*),
+ search_areas);
+ }
+
/*
* If the literal was created by native code, this search will not
@@ -807,12 +820,12 @@ static void mark_literal(Eterm* ptr)
}
}
-
static void
dump_literals(fmtfn_t to, void *to_arg)
{
ErtsCodeIndex code_ix;
int i;
+ Uint idx;
code_ix = erts_active_code_ix();
erts_rlock_old_code(code_ix);
@@ -825,6 +838,28 @@ dump_literals(fmtfn_t to, void *to_arg)
}
erts_runlock_old_code(code_ix);
+
+ for (idx = 0; idx < erts_num_persistent_areas; idx++) {
+ dump_module_literals(to, to_arg, erts_persistent_areas[idx]);
+ }
+}
+
+static void
+dump_persistent_terms(fmtfn_t to, void *to_arg)
+{
+ Uint idx;
+
+ erts_print(to, to_arg, "=persistent_terms\n");
+
+ for (idx = 0; idx < erts_num_persistent_areas; idx++) {
+ ErtsLiteralArea* ap = erts_persistent_areas[idx];
+ Eterm tuple = make_tuple(ap->start);
+ Eterm* tup_val = tuple_val(tuple);
+
+ dump_element(to, to_arg, tup_val[1]);
+ erts_putc(to, to_arg, '|');
+ dump_element_nl(to, to_arg, tup_val[2]);
+ }
}
static void
@@ -963,7 +998,8 @@ dump_module_literals(fmtfn_t to, void *to_arg, ErtsLiteralArea* lit_area)
}
erts_putc(to, to_arg, '\n');
}
- } else if (is_export_header(w)) {
+ } else {
+ /* Dump everything else in the external format */
dump_externally(to, to_arg, term);
erts_putc(to, to_arg, '\n');
}
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index b3bfa69052..880febba8b 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -22,6 +22,7 @@
#define ERL_UTILS_H__
#include "sys.h"
+#include "atom.h"
#include "erl_printf.h"
struct process;
@@ -112,10 +113,12 @@ int eq(Eterm, Eterm);
#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
-int erts_cmp_atoms(Eterm a, Eterm b);
-Sint erts_cmp(Eterm, Eterm, int, int);
-Sint erts_cmp_compound(Eterm, Eterm, int, int);
+ERTS_GLB_INLINE Sint erts_cmp(Eterm, Eterm, int, int);
+ERTS_GLB_INLINE int erts_cmp_atoms(Eterm a, Eterm b);
+
Sint cmp(Eterm a, Eterm b);
+Sint erts_cmp_compound(Eterm, Eterm, int, int);
+
#define CMP(A,B) erts_cmp(A,B,0,0)
#define CMP_TERM(A,B) erts_cmp(A,B,1,0)
#define CMP_EQ_ONLY(A,B) erts_cmp(A,B,0,1)
@@ -150,4 +153,56 @@ Sint cmp(Eterm a, Eterm b);
if (erts_cmp_compound(X,Y,0,EqOnly) Op 0) { Action; }; \
}
+#define erts_float_comp(x,y) (((x)<(y)) ? -1 : (((x)==(y)) ? 0 : 1))
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int erts_cmp_atoms(Eterm a, Eterm b) {
+ Atom *aa = atom_tab(atom_val(a));
+ Atom *bb = atom_tab(atom_val(b));
+
+ byte *name_a, *name_b;
+ int len_a, len_b, diff;
+
+ diff = aa->ord0 - bb->ord0;
+
+ if (diff != 0) {
+ return diff;
+ }
+
+ name_a = &aa->name[3];
+ name_b = &bb->name[3];
+ len_a = aa->len-3;
+ len_b = bb->len-3;
+
+ if (len_a > 0 && len_b > 0) {
+ diff = sys_memcmp(name_a, name_b, MIN(len_a, len_b));
+
+ if (diff != 0) {
+ return diff;
+ }
+ }
+
+ return len_a - len_b;
+}
+
+ERTS_GLB_INLINE Sint erts_cmp(Eterm a, Eterm b, int exact, int eq_only) {
+ if (is_atom(a) && is_atom(b)) {
+ return erts_cmp_atoms(a, b);
+ } else if (is_both_small(a, b)) {
+ return (signed_val(a) - signed_val(b));
+ } else if (is_float(a) && is_float(b)) {
+ FloatDef af, bf;
+
+ GET_DOUBLE(a, af);
+ GET_DOUBLE(b, bf);
+
+ return erts_float_comp(af.fd, bf.fd);
+ }
+
+ return erts_cmp_compound(a,b,exact,eq_only);
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#endif
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 621ba108ba..9a66e491f3 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1953,7 +1953,8 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
#define RETURN_STATE() \
do { \
- hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE+3); \
+ static const int TUPLE2_SIZE = 2 + 1; \
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE + TUPLE2_SIZE); \
c_term = erts_mk_magic_ref(&hp, &MSO(p), context_b); \
res = TUPLE2(hp, Term, c_term); \
BUMP_ALL_REDS(p); \
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 21ae205237..0631404599 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -906,6 +906,8 @@ typedef struct ErtsLiteralArea_ {
Eterm start[1]; /* beginning of area */
} ErtsLiteralArea;
+void erts_queue_release_literals(Process *c_p, ErtsLiteralArea* literals);
+
#define ERTS_LITERAL_AREA_ALLOC_SIZE(N) \
(sizeof(ErtsLiteralArea) + sizeof(Eterm)*((N) - 1))
@@ -1001,6 +1003,7 @@ typedef struct {
Uint literal_size;
Eterm *lit_purge_ptr;
Uint lit_purge_sz;
+ int copy_literals;
} erts_shcopy_t;
#define INITIALIZE_SHCOPY(info) \
@@ -1010,6 +1013,7 @@ typedef struct {
info.bitstore_start = info.bitstore_default; \
info.shtable_start = info.shtable_default; \
info.literal_size = 0; \
+ info.copy_literals = 0; \
if (larea__) { \
info.lit_purge_ptr = &larea__->start[0]; \
info.lit_purge_sz = larea__->end - info.lit_purge_ptr; \
@@ -1238,6 +1242,13 @@ Sint erts_re_set_loop_limit(Sint limit);
void erts_init_bif_binary(void);
Sint erts_binary_set_loop_limit(Sint limit);
+/* erl_bif_persistent.c */
+void erts_init_bif_persistent_term(void);
+Uint erts_persistent_term_count(void);
+void erts_init_persistent_dumping(void);
+extern ErtsLiteralArea** erts_persistent_areas;
+extern Uint erts_num_persistent_areas;
+
/* external.c */
void erts_init_external(void);
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index bb22548587..869a575cb4 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -325,6 +325,7 @@ typedef long Sint erts_align_attribute(sizeof(long));
#define UWORD_CONSTANT(Const) Const##UL
#define ERTS_UWORD_MAX ULONG_MAX
#define ERTS_SWORD_MAX LONG_MAX
+#define ERTS_SWORD_MIN LONG_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_LONG
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_INT
@@ -335,6 +336,7 @@ typedef int Sint erts_align_attribute(sizeof(int));
#define UWORD_CONSTANT(Const) Const##U
#define ERTS_UWORD_MAX UINT_MAX
#define ERTS_SWORD_MAX INT_MAX
+#define ERTS_SWORD_MIN INT_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_INT
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
@@ -345,6 +347,7 @@ typedef long long Sint erts_align_attribute(sizeof(long long));
#define UWORD_CONSTANT(Const) Const##ULL
#define ERTS_UWORD_MAX ULLONG_MAX
#define ERTS_SWORD_MAX LLONG_MAX
+#define ERTS_SWORD_MIN LLONG_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_LONG_LONG
#if defined(__WIN32__)
#define ErtsStrToSint _strtoi64
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 08f8ca9788..d81bd89a48 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -2615,27 +2615,6 @@ not_equal:
}
-/*
- * Lexically compare two strings of bytes (string s1 length l1 and s2 l2).
- *
- * s1 < s2 return -1
- * s1 = s2 return 0
- * s1 > s2 return +1
- */
-static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
-{
- int i;
- i = 0;
- while((i < l1) && (i < l2)) {
- if (s1[i] < s2[i]) return(-1);
- if (s1[i] > s2[i]) return(1);
- i++;
- }
- if (l1 < l2) return(-1);
- if (l1 > l2) return(1);
- return(0);
-}
-
/*
* Compare objects.
@@ -2649,20 +2628,6 @@ static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
*
*/
-
-#define float_comp(x,y) (((x)<(y)) ? -1 : (((x)==(y)) ? 0 : 1))
-
-int erts_cmp_atoms(Eterm a, Eterm b)
-{
- Atom *aa = atom_tab(atom_val(a));
- Atom *bb = atom_tab(atom_val(b));
- int diff = aa->ord0 - bb->ord0;
- if (diff)
- return diff;
- return cmpbytes(aa->name+3, aa->len-3,
- bb->name+3, bb->len-3);
-}
-
/* cmp(Eterm a, Eterm b)
* For compatibility with HiPE - arith-based compare.
*/
@@ -2673,22 +2638,6 @@ Sint cmp(Eterm a, Eterm b)
Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only);
-Sint erts_cmp(Eterm a, Eterm b, int exact, int eq_only)
-{
- if (is_atom(a) && is_atom(b)) {
- return erts_cmp_atoms(a, b);
- } else if (is_both_small(a, b)) {
- return (signed_val(a) - signed_val(b));
- } else if (is_float(a) && is_float(b)) {
- FloatDef af, bf;
- GET_DOUBLE(a, af);
- GET_DOUBLE(b, bf);
- return float_comp(af.fd, bf.fd);
- }
- return erts_cmp_compound(a,b,exact,eq_only);
-}
-
-
/* erts_cmp(Eterm a, Eterm b, int exact)
* exact = 1 -> term-based compare
* exact = 0 -> arith-based compare
@@ -2985,7 +2934,7 @@ tailrecur_ne:
GET_DOUBLE(a, af);
GET_DOUBLE(b, bf);
- ON_CMP_GOTO(float_comp(af.fd, bf.fd));
+ ON_CMP_GOTO(erts_float_comp(af.fd, bf.fd));
}
case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
@@ -3022,10 +2971,7 @@ tailrecur_ne:
ErlFunThing* f2 = (ErlFunThing *) fun_val(b);
Sint diff;
- diff = cmpbytes(atom_tab(atom_val(f1->fe->module))->name,
- atom_tab(atom_val(f1->fe->module))->len,
- atom_tab(atom_val(f2->fe->module))->name,
- atom_tab(atom_val(f2->fe->module))->len);
+ diff = erts_cmp_atoms((f1->fe)->module, (f2->fe)->module);
if (diff != 0) {
RETURN_NEQ(diff);
}
@@ -3219,7 +3165,7 @@ tailrecur_ne:
if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
/* Float is within the no loss limit */
f1.fd = signed_val(aw);
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
#if ERTS_SIZEOF_ETERM == 8
else if (f2.fd > (double) (MAX_SMALL + 1)) {
@@ -3266,7 +3212,7 @@ tailrecur_ne:
if (big_to_double(aw, &f1.fd) < 0) {
j = big_sign(aw) ? -1 : 1;
} else {
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
} else {
big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm));
@@ -3282,7 +3228,7 @@ tailrecur_ne:
if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) {
/* Float is within the no loss limit */
f2.fd = signed_val(bw);
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
#if ERTS_SIZEOF_ETERM == 8
else if (f1.fd > (double) (MAX_SMALL + 1)) {
diff --git a/erts/emulator/sys/unix/sys_uds.c b/erts/emulator/sys/unix/sys_uds.c
index 39a4866065..c9f73622ba 100644
--- a/erts/emulator/sys/unix/sys_uds.c
+++ b/erts/emulator/sys/unix/sys_uds.c
@@ -88,8 +88,9 @@ sys_uds_readv(int fd, struct iovec *iov, size_t iov_len,
if((msg.msg_flags & MSG_CTRUNC) == MSG_CTRUNC)
{
/* We assume that we have given enough space for any header
- that are sent to us. So the only remaining reason to get
- this flag set is if the caller has run out of file descriptors.
+ that are sent to us. So the only remaining reasons to get
+ this flag set is if the caller has run out of file descriptors
+ or an SELinux policy prunes the response (eg. O_APPEND on STDERR).
*/
errno = EMFILE;
return -1;
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index bf00de2204..6a064ec8d4 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -33,6 +33,7 @@ MODULES= \
after_SUITE \
alloc_SUITE \
async_ports_SUITE \
+ atomics_SUITE \
beam_SUITE \
beam_literals_SUITE \
bif_SUITE \
@@ -50,6 +51,7 @@ MODULES= \
call_trace_SUITE \
code_SUITE \
code_parallel_load_SUITE \
+ counters_SUITE \
crypto_SUITE \
ddll_SUITE \
decode_packet_SUITE \
@@ -92,6 +94,7 @@ MODULES= \
port_SUITE \
port_bif_SUITE \
prim_eval_SUITE \
+ persistent_term_SUITE \
process_SUITE \
pseudoknot_SUITE \
receive_SUITE \
diff --git a/erts/emulator/test/atomics_SUITE.erl b/erts/emulator/test/atomics_SUITE.erl
new file mode 100644
index 0000000000..8c42354770
--- /dev/null
+++ b/erts/emulator/test/atomics_SUITE.erl
@@ -0,0 +1,147 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(atomics_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [signed, unsigned, bad, signed_limits, unsigned_limits].
+
+signed(Config) when is_list(Config) ->
+ Size = 10,
+ Ref = atomics:new(Size,[]),
+ #{size:=Size, memory:=Memory} = atomics:info(Ref),
+ {_,true} = {Memory, Memory > Size*8},
+ {_,true} = {Memory, Memory < Size*max_atomic_sz() + 100},
+ [signed_do(Ref, Ix) || Ix <- lists:seq(1, Size)],
+ ok.
+
+signed_do(Ref, Ix) ->
+ 0 = atomics:get(Ref, Ix),
+ ok = atomics:put(Ref, Ix, 3),
+ ok = atomics:add(Ref, Ix, 14),
+ 17 = atomics:get(Ref, Ix),
+ 20 = atomics:add_get(Ref, Ix, 3),
+ -3 = atomics:add_get(Ref, Ix, -23),
+ 17 = atomics:add_get(Ref, Ix, 20),
+ ok = atomics:sub(Ref, Ix, 4),
+ 13 = atomics:get(Ref, Ix),
+ -7 = atomics:sub_get(Ref, Ix, 20),
+ 3 = atomics:sub_get(Ref, Ix, -10),
+ 3 = atomics:exchange(Ref, Ix, 666),
+ ok = atomics:compare_exchange(Ref, Ix, 666, 777),
+ 777 = atomics:compare_exchange(Ref, Ix, 666, -666),
+ ok.
+
+unsigned(Config) when is_list(Config) ->
+ Size = 10,
+ Ref = atomics:new(Size,[{signed, false}]),
+ #{size:=Size, memory:=Memory} = atomics:info(Ref),
+ true = Memory > Size*8,
+ true = Memory < Size*max_atomic_sz() + 100,
+ [unsigned_do(Ref, Ix) || Ix <- lists:seq(1, Size)],
+ ok.
+
+unsigned_do(Ref, Ix) ->
+ 0 = atomics:get(Ref, Ix),
+ ok = atomics:put(Ref, Ix, 3),
+ ok = atomics:add(Ref, Ix, 14),
+ 17 = atomics:get(Ref, Ix),
+ 20 = atomics:add_get(Ref, Ix, 3),
+ ok = atomics:sub(Ref, Ix, 7),
+ 13 = atomics:get(Ref, Ix),
+ 3 = atomics:sub_get(Ref, Ix, 10),
+ 3 = atomics:exchange(Ref, Ix, 666),
+ ok = atomics:compare_exchange(Ref, Ix, 666, 777),
+ 777 = atomics:compare_exchange(Ref, Ix, 666, 888),
+ ok.
+
+bad(Config) when is_list(Config) ->
+ {'EXIT',{badarg,_}} = (catch atomics:new(0,[])),
+ {'EXIT',{badarg,_}} = (catch atomics:new(10,[bad])),
+ {'EXIT',{badarg,_}} = (catch atomics:new(10,[{signed,bad}])),
+ {'EXIT',{badarg,_}} = (catch atomics:new(10,[{signed,true}, bad])),
+ {'EXIT',{badarg,_}} = (catch atomics:new(10,[{signed,false} | bad])),
+ Ref = atomics:new(10,[]),
+ {'EXIT',{badarg,_}} = (catch atomics:get(1742, 7)),
+ {'EXIT',{badarg,_}} = (catch atomics:get(make_ref(), 7)),
+ {'EXIT',{badarg,_}} = (catch atomics:get(Ref, -1)),
+ {'EXIT',{badarg,_}} = (catch atomics:get(Ref, 0)),
+ {'EXIT',{badarg,_}} = (catch atomics:get(Ref, 11)),
+ {'EXIT',{badarg,_}} = (catch atomics:get(Ref, 7.0)),
+ ok.
+
+
+signed_limits(Config) when is_list(Config) ->
+ Bits = 64,
+ Max = (1 bsl (Bits-1)) - 1,
+ Min = -(1 bsl (Bits-1)),
+
+ Ref = atomics:new(1,[{signed, true}]),
+ #{max:=Max, min:=Min} = atomics:info(Ref),
+ 0 = atomics:get(Ref, 1),
+ ok = atomics:add(Ref, 1, Max),
+ Min = atomics:add_get(Ref, 1, 1),
+ Max = atomics:sub_get(Ref, 1, 1),
+
+ IncrMax = (Max bsl 1) bor 1,
+ ok = atomics:put(Ref, 1, 0),
+ ok = atomics:add(Ref, 1, IncrMax),
+ -1 = atomics:get(Ref, 1),
+ {'EXIT',{badarg,_}} = (catch atomics:add(Ref, 1, IncrMax+1)),
+ {'EXIT',{badarg,_}} = (catch atomics:add(Ref, 1, Min-1)),
+
+ ok.
+
+unsigned_limits(Config) when is_list(Config) ->
+ Bits = 64,
+ Max = (1 bsl Bits) - 1,
+ Min = 0,
+
+ Ref = atomics:new(1,[{signed,false}]),
+ #{max:=Max, min:=Min} = atomics:info(Ref),
+ 0 = atomics:get(Ref, 1),
+ ok = atomics:add(Ref, 1, Max),
+ Min = atomics:add_get(Ref, 1, 1),
+ Max = atomics:sub_get(Ref, 1, 1),
+
+ {'EXIT',{badarg,_}} = (catch atomics:add(Ref, 1, Max+1)),
+ IncrMin = -(1 bsl (Bits-1)),
+ ok = atomics:put(Ref, 1, -IncrMin),
+ ok = atomics:add(Ref, 1, IncrMin),
+ 0 = atomics:get(Ref, 1),
+ {'EXIT',{badarg,_}} = (catch atomics:add(Ref, 1, IncrMin-1)),
+
+ ok.
+
+max_atomic_sz() ->
+ case erlang:system_info({wordsize, external}) of
+ 4 -> 16;
+ 8 ->
+ EI = erlang:system_info(ethread_info),
+ case lists:keyfind("64-bit native atomics", 1, EI) of
+ {_, "no", _} -> 16;
+ _ -> 8
+ end
+ end.
diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl
index 9c6dc3ff83..0444ba4f89 100644
--- a/erts/emulator/test/code_SUITE.erl
+++ b/erts/emulator/test/code_SUITE.erl
@@ -28,7 +28,7 @@
fake_literals/1,
false_dependency/1,coverage/1,fun_confusion/1,
t_copy_literals/1, t_copy_literals_frags/1,
- erl_544/1]).
+ erl_544/1, max_heap_size/1]).
-define(line_trace, 1).
-include_lib("common_test/include/ct.hrl").
@@ -43,7 +43,7 @@ all() ->
constant_pools, constant_refc_binaries, fake_literals,
false_dependency,
coverage, fun_confusion, t_copy_literals, t_copy_literals_frags,
- erl_544].
+ erl_544, max_heap_size].
init_per_suite(Config) ->
erts_debug:set_internal_state(available_internal_state, true),
@@ -968,6 +968,39 @@ erl_544(Config) when is_list(Config) ->
{skipped, "Only run when native file name encoding is utf8"}
end.
+%% Test that the copying of literals to a process during purging of
+%% literals will cause the process to be killed if the max heap size
+%% is exceeded.
+max_heap_size(_Config) ->
+ Mod = ?FUNCTION_NAME,
+ Value = [I || I <- lists:seq(1, 5000)],
+ Code = gen_lit(Mod, [{term,Value}]),
+ {module,Mod} = erlang:load_module(Mod, Code),
+ SpawnOpts = [monitor,
+ {max_heap_size,
+ #{size=>1024,
+ kill=>true,
+ error_logger=>true}}],
+ {Pid,Ref} = spawn_opt(fun() ->
+ max_heap_size_proc(Mod)
+ end, SpawnOpts),
+ receive
+ {'DOWN',Ref,process,Pid,Reason} ->
+ killed = Reason;
+ Other ->
+ ct:fail({unexpected_message,Other})
+ after 10000 ->
+ ct:fail({process_did_not_die, Pid, erlang:process_info(Pid)})
+ end.
+
+max_heap_size_proc(Mod) ->
+ Value = Mod:term(),
+ code:delete(Mod),
+ code:purge(Mod),
+ receive
+ _ -> Value
+ end.
+
%% Utilities.
make_sub_binary(Bin) when is_binary(Bin) ->
diff --git a/erts/emulator/test/counters_SUITE.erl b/erts/emulator/test/counters_SUITE.erl
new file mode 100644
index 0000000000..7de164096b
--- /dev/null
+++ b/erts/emulator/test/counters_SUITE.erl
@@ -0,0 +1,112 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(counters_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [basic, bad, limits].
+
+basic(Config) when is_list(Config) ->
+ Size = 10,
+ [begin
+ Ref = counters:new(Size,[Type]),
+ #{size:=Size, memory:=Memory} = counters:info(Ref),
+ check_memory(Type, Memory, Size),
+ [basic_do(Ref, Ix) || Ix <- lists:seq(1, Size)]
+ end
+ || Type <- [atomics, write_concurrency]],
+ ok.
+
+basic_do(Ref, Ix) ->
+ 0 = counters:get(Ref, Ix),
+ ok = counters:add(Ref, Ix, 3),
+ 3 = counters:get(Ref, Ix),
+ ok = counters:add(Ref, Ix, 14),
+ 17 = counters:get(Ref, Ix),
+ ok = counters:add(Ref, Ix, -20),
+ -3 = counters:get(Ref, Ix),
+ ok = counters:add(Ref, Ix, 100),
+ 97 = counters:get(Ref, Ix),
+ ok = counters:sub(Ref, Ix, 20),
+ 77 = counters:get(Ref, Ix),
+ ok = counters:sub(Ref, Ix, -10),
+ 87 = counters:get(Ref, Ix),
+ ok.
+
+check_memory(atomics, Memory, Size) ->
+ {_,true} = {Memory, Memory > Size*8},
+ {_,true} = {Memory, Memory < Size*max_atomic_sz() + 100};
+check_memory(write_concurrency, Memory, Size) ->
+ NScheds = erlang:system_info(schedulers),
+ {_,true} = {Memory, Memory > NScheds*Size*8},
+ {_,true} = {Memory, Memory < NScheds*(Size+7)*max_atomic_sz() + 100}.
+
+max_atomic_sz() ->
+ case erlang:system_info({wordsize, external}) of
+ 4 -> 16;
+ 8 ->
+ EI = erlang:system_info(ethread_info),
+ case lists:keyfind("64-bit native atomics", 1, EI) of
+ {_, "no", _} -> 16;
+ _ -> 8
+ end
+ end.
+
+bad(Config) when is_list(Config) ->
+ {'EXIT',{badarg,_}} = (catch counters:new(0,[])),
+ {'EXIT',{badarg,_}} = (catch counters:new(10,[bad])),
+ {'EXIT',{badarg,_}} = (catch counters:new(10,[atomic, bad])),
+ {'EXIT',{badarg,_}} = (catch counters:new(10,[write_concurrency | bad])),
+ Ref = counters:new(10,[]),
+ {'EXIT',{badarg,_}} = (catch counters:get(1742, 7)),
+ {'EXIT',{badarg,_}} = (catch counters:get(make_ref(), 7)),
+ {'EXIT',{badarg,_}} = (catch counters:get(Ref, -1)),
+ {'EXIT',{badarg,_}} = (catch counters:get(Ref, 0)),
+ {'EXIT',{badarg,_}} = (catch counters:get(Ref, 11)),
+ {'EXIT',{badarg,_}} = (catch counters:get(Ref, 7.0)),
+ ok.
+
+
+limits(Config) when is_list(Config) ->
+ Bits = 64,
+ Max = (1 bsl (Bits-1)) - 1,
+ Min = -(1 bsl (Bits-1)),
+
+ Ref = counters:new(1,[]),
+ 0 = counters:get(Ref, 1),
+ ok = counters:add(Ref, 1, Max),
+ ok = counters:add(Ref, 1, 1),
+ Min = counters:get(Ref, 1),
+ ok = counters:sub(Ref, 1, 1),
+ Max = counters:get(Ref, 1),
+
+ IncrMax = (Max bsl 1) bor 1,
+ ok = counters:sub(Ref, 1, counters:get(Ref, 1)),
+ ok = counters:add(Ref, 1, IncrMax),
+ -1 = counters:get(Ref, 1),
+ {'EXIT',{badarg,_}} = (catch counters:add(Ref, 1, IncrMax+1)),
+ {'EXIT',{badarg,_}} = (catch counters:add(Ref, 1, Min-1)),
+
+ ok.
diff --git a/erts/emulator/test/persistent_term_SUITE.erl b/erts/emulator/test/persistent_term_SUITE.erl
new file mode 100644
index 0000000000..58cd3276b0
--- /dev/null
+++ b/erts/emulator/test/persistent_term_SUITE.erl
@@ -0,0 +1,614 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%5
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(persistent_term_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-export([all/0,suite/0,
+ basic/1,purging/1,sharing/1,get_trapping/1,
+ info/1,info_trapping/1,killed_while_trapping/1,
+ off_heap_values/1,keys/1,collisions/1,
+ init_restart/1]).
+
+%%
+-export([test_init_restart_cmd/1]).
+
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,10}}].
+
+all() ->
+ [basic,purging,sharing,get_trapping,info,info_trapping,
+ killed_while_trapping,off_heap_values,keys,collisions,
+ init_restart].
+
+basic(_Config) ->
+ Chk = chk(),
+ N = 777,
+ Seq = lists:seq(1, N),
+ par(2, N, Seq),
+ seq(3, Seq),
+ seq(3, Seq), %Same values.
+ _ = [begin
+ Key = {?MODULE,{key,I}},
+ true = persistent_term:erase(Key),
+ false = persistent_term:erase(Key),
+ {'EXIT',{badarg,_}} = (catch persistent_term:get(Key))
+ end || I <- Seq],
+ [] = [P || {{?MODULE,_},_}=P <- persistent_term:get()],
+ chk(Chk).
+
+par(C, N, Seq) ->
+ _ = [spawn_link(fun() ->
+ ok = persistent_term:put({?MODULE,{key,I}},
+ {value,C*I})
+ end) || I <- Seq],
+ Result = wait(N),
+ _ = [begin
+ Double = C*I,
+ {{?MODULE,{key,I}},{value,Double}} = Res
+ end || {I,Res} <- lists:zip(Seq, Result)],
+ ok.
+
+seq(C, Seq) ->
+ _ = [ok = persistent_term:put({?MODULE,{key,I}}, {value,C*I}) ||
+ I <- Seq],
+ All = persistent_term:get(),
+ All = [P || {{?MODULE,_},_}=P <- persistent_term:get()],
+ All = [{Key,persistent_term:get(Key)} || {Key,_} <- All],
+ Result = lists:sort(All),
+ _ = [begin
+ Double = C*I,
+ {{?MODULE,{key,I}},{value,Double}} = Res
+ end || {I,Res} <- lists:zip(Seq, Result)],
+ ok.
+
+wait(N) ->
+ All = [P || {{?MODULE,_},_}=P <- persistent_term:get()],
+ case length(All) of
+ N ->
+ All = [{Key,persistent_term:get(Key)} || {Key,_} <- All],
+ lists:sort(All);
+ _ ->
+ receive after 10 -> ok end,
+ wait(N)
+ end.
+
+%% Make sure that terms that have been erased are copied into all
+%% processes that still hold a pointer to them.
+
+purging(_Config) ->
+ Chk = chk(),
+ do_purging(fun(K) -> persistent_term:put(K, {?MODULE,new}) end,
+ replaced),
+ do_purging(fun persistent_term:erase/1, erased),
+ chk(Chk).
+
+do_purging(Eraser, Type) ->
+ Parent = self(),
+ Key = {?MODULE,?FUNCTION_NAME},
+ ok = persistent_term:put(Key, {term,[<<"abc",0:777/unit:8>>]}),
+ Ps0 = [spawn_monitor(fun() -> purging_tester(Parent, Key) end) ||
+ _ <- lists:seq(1, 50)],
+ Ps = maps:from_list(Ps0),
+ purging_recv(gotten, Ps),
+ Eraser(Key),
+ _ = [P ! {Parent,Type} || P <- maps:keys(Ps)],
+ purging_wait(Ps).
+
+purging_recv(Tag, Ps) when map_size(Ps) > 0 ->
+ receive
+ {Pid,Tag} ->
+ true = is_map_key(Pid, Ps),
+ purging_recv(Tag, maps:remove(Pid, Ps))
+ end;
+purging_recv(_, _) -> ok.
+
+purging_wait(Ps) when map_size(Ps) > 0 ->
+ receive
+ {'DOWN',Ref,process,Pid,Reason} ->
+ normal = Reason,
+ Ref = map_get(Pid, Ps),
+ purging_wait(maps:remove(Pid, Ps))
+ end;
+purging_wait(_) -> ok.
+
+purging_tester(Parent, Key) ->
+ Term = persistent_term:get(Key),
+ purging_check_term(Term),
+ 0 = erts_debug:size_shared(Term),
+ Parent ! {self(),gotten},
+ receive
+ {Parent,erased} ->
+ {'EXIT',{badarg,_}} = (catch persistent_term:get(Key)),
+ purging_tester_1(Term);
+ {Parent,replaced} ->
+ {?MODULE,new} = persistent_term:get(Key),
+ purging_tester_1(Term)
+ end.
+
+%% Wait for the term to be copied into this process.
+purging_tester_1(Term) ->
+ purging_check_term(Term),
+ receive after 1 -> ok end,
+ case erts_debug:size_shared(Term) of
+ 0 ->
+ purging_tester_1(Term);
+ Size ->
+ %% The term has been copied into this process.
+ purging_check_term(Term),
+ Size = erts_debug:size(Term)
+ end.
+
+purging_check_term({term,[<<"abc",0:777/unit:8>>]}) ->
+ ok.
+
+%% Test that sharing is preserved when storing terms.
+
+sharing(_Config) ->
+ Chk = chk(),
+ Depth = 10,
+ Size = 2*Depth,
+ Shared = lists:foldl(fun(_, A) -> [A|A] end,
+ [], lists:seq(1, Depth)),
+ Size = erts_debug:size(Shared),
+ Key = {?MODULE,?FUNCTION_NAME},
+ ok = persistent_term:put(Key, Shared),
+ SharedStored = persistent_term:get(Key),
+ Size = erts_debug:size(SharedStored),
+ 0 = erts_debug:size_shared(SharedStored),
+
+ {Pid,Ref} = spawn_monitor(fun() ->
+ Term = persistent_term:get(Key),
+ Size = erts_debug:size(Term),
+ 0 = erts_debug:size_shared(Term),
+ true = Term =:= SharedStored
+ end),
+ receive
+ {'DOWN',Ref,process,Pid,normal} ->
+ true = persistent_term:erase(Key),
+ Size = erts_debug:size(SharedStored),
+ chk(Chk)
+ end.
+
+%% Test trapping of persistent_term:get/0.
+
+get_trapping(_Config) ->
+ Chk = chk(),
+
+ %% Assume that the get/0 traps after 4000 iterations
+ %% in a non-debug emulator.
+ N = case test_server:timetrap_scale_factor() of
+ 1 -> 10000;
+ _ -> 1000
+ end,
+ spawn_link(fun() -> get_trapping_create(N) end),
+ All = do_get_trapping(N, []),
+ N = get_trapping_check_result(lists:sort(All), 1),
+ erlang:garbage_collect(),
+ get_trapping_erase(N),
+ chk(Chk).
+
+do_get_trapping(N, Prev) ->
+ case persistent_term:get() of
+ Prev when length(Prev) >= N ->
+ All = [P || {{?MODULE,{get_trapping,_}},_}=P <- Prev],
+ case length(All) of
+ N -> All;
+ _ -> do_get_trapping(N, Prev)
+ end;
+ New ->
+ receive after 1 -> ok end,
+ do_get_trapping(N, New)
+ end.
+
+get_trapping_create(0) ->
+ ok;
+get_trapping_create(N) ->
+ ok = persistent_term:put({?MODULE,{get_trapping,N}}, N),
+ get_trapping_create(N-1).
+
+get_trapping_check_result([{{?MODULE,{get_trapping,N}},N}|T], N) ->
+ get_trapping_check_result(T, N+1);
+get_trapping_check_result([], N) -> N-1.
+
+get_trapping_erase(0) ->
+ ok;
+get_trapping_erase(N) ->
+ true = persistent_term:erase({?MODULE,{get_trapping,N}}),
+ get_trapping_erase(N-1).
+
+%% Test retrieving information about persistent terms.
+
+info(_Config) ->
+ Chk = chk(),
+
+ %% White box test of info/0.
+ N = 100,
+ try
+ Overhead = info_literal_area_overhead(),
+ io:format("Overhead = ~p\n", [Overhead]),
+ info_wb(N, Overhead, info_info())
+ after
+ _ = [_ = persistent_term:erase({?MODULE,I}) ||
+ I <- lists:seq(1, N)]
+ end,
+
+ chk(Chk).
+
+%% White box test of persistent_term:info/0. We take into account
+%% that there might already exist persistent terms (created by the
+%% OTP standard libraries), but we assume that they are not
+%% changed during the execution of this test case.
+
+info_wb(0, _, _) ->
+ ok;
+info_wb(N, Overhead, {BaseCount,BaseMemory}) ->
+ Key = {?MODULE,N},
+ Value = lists:seq(1, N),
+ ok = persistent_term:put(Key, Value),
+
+ %% Calculate the extra memory needed for this term.
+ WordSize = erlang:system_info(wordsize),
+ ExtraMemory = Overhead + 2 * N * WordSize,
+
+ %% Call persistent_term:info/0.
+ {Count,Memory} = info_info(),
+
+ %% There should be one more persistent term.
+ Count = BaseCount + 1,
+
+ %% Verify that the amount of memory is correct.
+ case BaseMemory + ExtraMemory of
+ Memory ->
+ %% Exactly right. The size of the hash table was not changed.
+ ok;
+ Expected ->
+ %% The size of the hash table has been doubled to avoid filling
+ %% the table to more than 50 percent. The previous number
+ %% of entries must have been exactly half the size of the
+ %% hash table. The expected number of extra words added by
+ %% the resizing will be twice that number.
+ ExtraWords = BaseCount * 2,
+ true = ExtraWords * WordSize =:= (Memory - Expected)
+ end,
+ info_wb(N-1, Overhead, {Count,Memory}).
+
+info_info() ->
+ #{count:=Count,memory:=Memory} = persistent_term:info(),
+ true = is_integer(Count) andalso Count >= 0,
+ true = is_integer(Memory) andalso Memory >= 0,
+ {Count,Memory}.
+
+%% Calculate the number of extra bytes needed for storing each term in
+%% the literal, assuming that the key is a tuple of size 2 with
+%% immediate elements. The calculated number is the size of the
+%% ErtsLiteralArea struct excluding the storage for the literal term
+%% itself.
+
+info_literal_area_overhead() ->
+ Key1 = {?MODULE,1},
+ Key2 = {?MODULE,2},
+ #{memory:=Mem0} = persistent_term:info(),
+ ok = persistent_term:put(Key1, literal),
+ #{memory:=Mem1} = persistent_term:info(),
+ ok = persistent_term:put(Key2, literal),
+ #{memory:=Mem2} = persistent_term:info(),
+ true = persistent_term:erase(Key1),
+ true = persistent_term:erase(Key2),
+
+ %% The size of the hash table may have doubled when inserting
+ %% one of the keys. To avoiding counting the change in the hash
+ %% table size, take the smaller size increase.
+ min(Mem2-Mem1, Mem1-Mem0).
+
+%% Test trapping of persistent_term:info/0.
+
+info_trapping(_Config) ->
+ Chk = chk(),
+
+ %% Assume that the info/0 traps after 4000 iterations
+ %% in a non-debug emulator.
+ N = case test_server:timetrap_scale_factor() of
+ 1 -> 10000;
+ _ -> 1000
+ end,
+ spawn_link(fun() -> info_trapping_create(N) end),
+ All = do_info_trapping(N, 0),
+ N = info_trapping_check_result(lists:sort(All), 1),
+ erlang:garbage_collect(),
+ info_trapping_erase(N),
+ chk(Chk).
+
+do_info_trapping(N, PrevMem) ->
+ case info_info() of
+ {N,Mem} ->
+ true = Mem >= PrevMem,
+ All = [P || {{?MODULE,{info_trapping,_}},_}=P <- persistent_term:get()],
+ case length(All) of
+ N -> All;
+ _ -> do_info_trapping(N, PrevMem)
+ end;
+ {_,Mem} ->
+ true = Mem >= PrevMem,
+ receive after 1 -> ok end,
+ do_info_trapping(N, Mem)
+ end.
+
+info_trapping_create(0) ->
+ ok;
+info_trapping_create(N) ->
+ ok = persistent_term:put({?MODULE,{info_trapping,N}}, N),
+ info_trapping_create(N-1).
+
+info_trapping_check_result([{{?MODULE,{info_trapping,N}},N}|T], N) ->
+ info_trapping_check_result(T, N+1);
+info_trapping_check_result([], N) -> N-1.
+
+info_trapping_erase(0) ->
+ ok;
+info_trapping_erase(N) ->
+ true = persistent_term:erase({?MODULE,{info_trapping,N}}),
+ info_trapping_erase(N-1).
+
+%% Test that hash tables are deallocated if a process running
+%% persistent_term:get/0 is killed.
+
+killed_while_trapping(_Config) ->
+ Chk = chk(),
+ N = case test_server:timetrap_scale_factor() of
+ 1 -> 20000;
+ _ -> 2000
+ end,
+ kwt_put(N),
+ kwt_spawn(10),
+ kwt_erase(N),
+ chk(Chk).
+
+kwt_put(0) ->
+ ok;
+kwt_put(N) ->
+ ok = persistent_term:put({?MODULE,{kwt,N}}, N),
+ kwt_put(N-1).
+
+kwt_spawn(0) ->
+ ok;
+kwt_spawn(N) ->
+ Pids = [spawn(fun kwt_getter/0) || _ <- lists:seq(1, 20)],
+ erlang:yield(),
+ _ = [exit(Pid, kill) || Pid <- Pids],
+ kwt_spawn(N-1).
+
+kwt_getter() ->
+ _ = persistent_term:get(),
+ kwt_getter().
+
+kwt_erase(0) ->
+ ok;
+kwt_erase(N) ->
+ true = persistent_term:erase({?MODULE,{kwt,N}}),
+ kwt_erase(N-1).
+
+%% Test storing off heap values (such as ref-counted binaries).
+
+off_heap_values(_Config) ->
+ Chk = chk(),
+ Key = {?MODULE,?FUNCTION_NAME},
+ Val = {a,list_to_binary(lists:seq(0, 255)),make_ref(),fun() -> ok end},
+ ok = persistent_term:put(Key, Val),
+ FetchedVal = persistent_term:get(Key),
+ Val = FetchedVal,
+ true = persistent_term:erase(Key),
+ off_heap_values_wait(FetchedVal, Val),
+ chk(Chk).
+
+off_heap_values_wait(FetchedVal, Val) ->
+ case erts_debug:size_shared(FetchedVal) of
+ 0 ->
+ Val = FetchedVal,
+ ok;
+ _ ->
+ erlang:yield(),
+ off_heap_values_wait(FetchedVal, Val)
+ end.
+
+%% Test some more data types as keys. Use the module name as a key
+%% to minimize the risk of collision with any key used
+%% by the OTP libraries.
+
+keys(_Config) ->
+ Chk = chk(),
+ do_key(?MODULE),
+ do_key([?MODULE]),
+ do_key(?MODULE_STRING),
+ do_key(list_to_binary(?MODULE_STRING)),
+ chk(Chk).
+
+do_key(Key) ->
+ Val = term_to_binary(Key),
+ ok = persistent_term:put(Key, Val),
+ StoredVal = persistent_term:get(Key),
+ Val = StoredVal,
+ true = persistent_term:erase(Key).
+
+%% Create persistent terms with keys that are known to collide.
+%% Delete them in random order, making sure that all others
+%% terms can still be found.
+
+collisions(_Config) ->
+ Chk = chk(),
+
+ %% Create persistent terms with random keys.
+ Keys = lists:flatten(colliding_keys()),
+ Kvs = [{K,rand:uniform(1000)} || K <- Keys],
+ _ = [ok = persistent_term:put(K, V) || {K,V} <- Kvs],
+ _ = [V = persistent_term:get(K) || {K,V} <- Kvs],
+
+ %% Now delete the persistent terms in random order.
+ collisions_delete(lists:keysort(2, Kvs)),
+
+ chk(Chk).
+
+collisions_delete([{Key,Val}|Kvs]) ->
+ Val = persistent_term:get(Key),
+ true = persistent_term:erase(Key),
+ true = lists:sort(persistent_term:get()) =:= lists:sort(Kvs),
+ _ = [V = persistent_term:get(K) || {K,V} <- Kvs],
+ collisions_delete(Kvs);
+collisions_delete([]) ->
+ ok.
+
+colliding_keys() ->
+ %% Collisions found by Jesper L. Andersen for breaking maps.
+ L = [[764492191,2361333849],
+ [49527266765044,90940896816021,20062927283041,267080852079651],
+ [249858369443708,206247021789428,20287304470696,25847120931175],
+ [10645228898670,224705626119556,267405565521452,258214397180678],
+ [264783762221048,166955943492306,98802957003141,102012488332476],
+ [69425677456944,177142907243411,137138950917722,228865047699598],
+ [116031213307147,29203342183358,37406949328742,255198080174323],
+ [200358182338308,235207156008390,120922906095920,116215987197289],
+ [58728890318426,68877471005069,176496507286088,221041411345780],
+ [91094120814795,50665258299931,256093108116737,19777509566621],
+ [74646746200247,98350487270564,154448261001199,39881047281135],
+ [23408943649483,164410325820923,248161749770122,274558342231648],
+ [169531547115055,213630535746863,235098262267796,200508473898303],
+ [235098564415817,85039146398174,51721575960328,173069189684390],
+ [176136386396069,155368359051606,147817099696487,265419485459634],
+ [137542881551462,40028925519736,70525669519846,63445773516557],
+ [173854695142814,114282444507812,149945832627054,99605565798831],
+ [177686773562184,127158716984798,132495543008547],
+ [227073396444896,139667311071766,158915951283562],
+ [26212438434289,94902985796531,198145776057315],
+ [266279278943923,58550737262493,74297973216378],
+ [32373606512065,131854353044428,184642643042326],
+ [34335377662439,85341895822066,273492717750246]],
+
+ %% Verify that the keys still collide (this will fail if the
+ %% internal hash function has been changed).
+ erts_debug:set_internal_state(available_internal_state, true),
+ try
+ case erlang:system_info(wordsize) of
+ 8 ->
+ verify_colliding_keys(L);
+ 4 ->
+ %% Not guaranteed to collide on a 32-bit system.
+ ok
+ end
+ after
+ erts_debug:set_internal_state(available_internal_state, false)
+ end,
+
+ L.
+
+verify_colliding_keys([[K|Ks]|Gs]) ->
+ Hash = internal_hash(K),
+ [Hash] = lists:usort([internal_hash(Key) || Key <- Ks]),
+ verify_colliding_keys(Gs);
+verify_colliding_keys([]) ->
+ ok.
+
+internal_hash(Term) ->
+ erts_debug:get_internal_state({internal_hash,Term}).
+
+%% Test that all persistent terms are erased by init:restart/0.
+
+init_restart(_Config) ->
+ File = "command_file",
+ ok = file:write_file(File, term_to_binary(restart)),
+ {ok,[[Erl]]} = init:get_argument(progname),
+ ModPath = filename:dirname(code:which(?MODULE)),
+ Cmd = Erl ++ " -pa " ++ ModPath ++ " -noshell "
+ "-run " ++ ?MODULE_STRING ++ " test_init_restart_cmd " ++
+ File,
+ io:format("~s\n", [Cmd]),
+ Expected = "12ok",
+ case os:cmd(Cmd) of
+ Expected ->
+ ok;
+ Actual ->
+ io:format("Expected: ~s", [Expected]),
+ io:format("Actual: ~s\n", [Actual]),
+ ct:fail(unexpected_output)
+ end.
+
+test_init_restart_cmd([File]) ->
+ try
+ do_test_init_restart_cmd(File)
+ catch
+ C:R ->
+ io:format("\n~p ~p\n", [C,R]),
+ halt()
+ end,
+ receive
+ _ -> ok
+ end.
+
+do_test_init_restart_cmd(File) ->
+ {ok,Bin} = file:read_file(File),
+ Seq = lists:seq(1, 50),
+ case binary_to_term(Bin) of
+ restart ->
+ _ = [persistent_term:put({?MODULE,I}, {value,I}) ||
+ I <- Seq],
+ ok = file:write_file(File, term_to_binary(was_restarted)),
+ io:put_chars("1"),
+ init:restart(),
+ receive
+ _ -> ok
+ end;
+ was_restarted ->
+ io:put_chars("2"),
+ ok = file:delete(File),
+ _ = [begin
+ Key = {?MODULE,I},
+ {'EXIT',{badarg,_}} = (catch persistent_term:get(Key))
+ end || I <- Seq],
+ io:put_chars("ok"),
+ init:stop()
+ end.
+
+%% Check that there is the same number of persistents terms before
+%% and after each test case.
+
+chk() ->
+ persistent_term:info().
+
+chk(Chk) ->
+ Chk = persistent_term:info(),
+ Key = {?MODULE,?FUNCTION_NAME},
+ ok = persistent_term:put(Key, {term,Chk}),
+ Term = persistent_term:get(Key),
+ true = persistent_term:erase(Key),
+ chk_not_stuck(Term),
+ ok.
+
+chk_not_stuck(Term) ->
+ %% Hash tables to be deleted are put onto a queue.
+ %% Make sure that the queue isn't stuck by a table with
+ %% a non-zero ref count.
+
+ case erts_debug:size_shared(Term) of
+ 0 ->
+ erlang:yield(),
+ chk_not_stuck(Term);
+ _ ->
+ ok
+ end.
diff --git a/erts/etc/unix/run_erl.c b/erts/etc/unix/run_erl.c
index 725343d701..bfb3e1bd2c 100644
--- a/erts/etc/unix/run_erl.c
+++ b/erts/etc/unix/run_erl.c
@@ -43,10 +43,10 @@
#endif
#ifdef HAVE_WORKING_POSIX_OPENPT
# ifndef _XOPEN_SOURCE
- /* On OS X and BSD, we must leave _XOPEN_SOURCE undefined in order for
- * the prototype of vsyslog() to be included.
+ /* On OS X, BSD and Solaris, we must leave _XOPEN_SOURCE undefined in order
+ * for the prototype of vsyslog() to be included.
*/
-# if !(defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__))
+# if !(defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__) || defined(__sun))
# define _XOPEN_SOURCE 600
# endif
# endif
diff --git a/erts/preloaded/ebin/atomics.beam b/erts/preloaded/ebin/atomics.beam
new file mode 100644
index 0000000000..1de97fa668
--- /dev/null
+++ b/erts/preloaded/ebin/atomics.beam
Binary files differ
diff --git a/erts/preloaded/ebin/counters.beam b/erts/preloaded/ebin/counters.beam
new file mode 100644
index 0000000000..caaa6167e1
--- /dev/null
+++ b/erts/preloaded/ebin/counters.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_internal.beam b/erts/preloaded/ebin/erts_internal.beam
index 15c59de80a..e174f71966 100644
--- a/erts/preloaded/ebin/erts_internal.beam
+++ b/erts/preloaded/ebin/erts_internal.beam
Binary files differ
diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam
index 858a9dc63e..1e60ef7e88 100644
--- a/erts/preloaded/ebin/init.beam
+++ b/erts/preloaded/ebin/init.beam
Binary files differ
diff --git a/erts/preloaded/ebin/persistent_term.beam b/erts/preloaded/ebin/persistent_term.beam
new file mode 100644
index 0000000000..79ef03b9a6
--- /dev/null
+++ b/erts/preloaded/ebin/persistent_term.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_inet.beam b/erts/preloaded/ebin/prim_inet.beam
index 52bab031ff..eaa1e2cdf8 100644
--- a/erts/preloaded/ebin/prim_inet.beam
+++ b/erts/preloaded/ebin/prim_inet.beam
Binary files differ
diff --git a/erts/preloaded/src/Makefile b/erts/preloaded/src/Makefile
index 4333f6643a..e1bd5bc295 100644
--- a/erts/preloaded/src/Makefile
+++ b/erts/preloaded/src/Makefile
@@ -47,7 +47,10 @@ PRE_LOADED_ERL_MODULES = \
erts_internal \
erl_tracer \
erts_literal_area_collector \
- erts_dirty_process_signal_handler
+ erts_dirty_process_signal_handler \
+ atomics \
+ counters \
+ persistent_term
PRE_LOADED_BEAM_MODULES = \
prim_eval
diff --git a/erts/preloaded/src/atomics.erl b/erts/preloaded/src/atomics.erl
new file mode 100644
index 0000000000..d1fe5e65cf
--- /dev/null
+++ b/erts/preloaded/src/atomics.erl
@@ -0,0 +1,119 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%% Purpose : Main atomics API module.
+
+-module(atomics).
+
+-export([new/2,
+ put/3, get/2,
+ add/3, add_get/3,
+ sub/3, sub_get/3,
+ exchange/3, compare_exchange/4,
+ info/1]).
+
+-export_type([atomics_ref/0]).
+
+-opaque atomics_ref() :: reference().
+
+-define(OPT_SIGNED, (1 bsl 0)).
+-define(OPT_DEFAULT, ?OPT_SIGNED).
+
+-spec new(Arity, Opts) -> atomics_ref() when
+ Arity :: pos_integer(),
+ Opts :: [Opt],
+ Opt :: {signed, boolean()}.
+new(Arity, Opts) ->
+ erts_internal:atomics_new(Arity, encode_opts(Opts, ?OPT_DEFAULT)).
+
+encode_opts([{signed, true}|T], Acc) ->
+ encode_opts(T, Acc bor ?OPT_SIGNED);
+encode_opts([{signed, false}|T], Acc) ->
+ encode_opts(T, Acc band (bnot ?OPT_SIGNED));
+encode_opts([], Acc) ->
+ Acc;
+encode_opts(_, _) ->
+ erlang:error(badarg).
+
+-spec put(Ref, Ix, Value) -> ok when
+ Ref :: atomics_ref(),
+ Ix :: integer(),
+ Value :: integer().
+put(_Ref, _Ix, _Value) ->
+ erlang:nif_error(undef).
+
+-spec get(Ref, Ix) -> integer() when
+ Ref :: atomics_ref(),
+ Ix :: integer().
+get(_Ref, _Ix) ->
+ erlang:nif_error(undef).
+
+-spec add(Ref, Ix, Incr) -> ok when
+ Ref :: atomics_ref(),
+ Ix :: integer(),
+ Incr :: integer().
+add(_Ref, _Ix, _Incr) ->
+ erlang:nif_error(undef).
+
+-spec add_get(Ref, Ix, Incr) -> integer() when
+ Ref :: atomics_ref(),
+ Ix :: integer(),
+ Incr :: integer().
+add_get(_Ref, _Ix, _Incr) ->
+ erlang:nif_error(undef).
+
+-spec sub(Ref, Ix, Decr) -> ok when
+ Ref :: atomics_ref(),
+ Ix :: integer(),
+ Decr :: integer().
+sub(Ref, Ix, Decr) ->
+ ?MODULE:add(Ref, Ix, -Decr).
+
+-spec sub_get(Ref, Ix, Decr) -> integer() when
+ Ref :: atomics_ref(),
+ Ix :: integer(),
+ Decr :: integer().
+sub_get(Ref, Ix, Decr) ->
+ ?MODULE:add_get(Ref, Ix, -Decr).
+
+-spec exchange(Ref, Ix, Desired) -> integer() when
+ Ref :: atomics_ref(),
+ Ix :: integer(),
+ Desired :: integer().
+exchange(_Ref, _Ix, _Desired) ->
+ erlang:nif_error(undef).
+
+-spec compare_exchange(Ref, Ix, Expected, Desired) -> ok | integer() when
+ Ref :: atomics_ref(),
+ Ix :: integer(),
+ Expected :: integer(),
+ Desired :: integer().
+compare_exchange(_Ref, _Ix, _Expected, _Desired) ->
+ erlang:nif_error(undef).
+
+-spec info(Ref) -> Info when
+ Ref :: atomics_ref(),
+ Info :: #{'size':=Size,'max':=Max,'min':=Min,'memory':=Memory},
+ Size :: non_neg_integer(),
+ Max :: integer(),
+ Min :: integer(),
+ Memory :: non_neg_integer().
+info(_Ref) ->
+ erlang:nif_error(undef).
diff --git a/erts/preloaded/src/counters.erl b/erts/preloaded/src/counters.erl
new file mode 100644
index 0000000000..67354f648d
--- /dev/null
+++ b/erts/preloaded/src/counters.erl
@@ -0,0 +1,90 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%% Purpose : Main atomics API module.
+
+-module(counters).
+
+-export([new/2,
+ get/2,
+ add/3,
+ sub/3,
+ info/1]).
+
+-export_type([counters_ref/0]).
+
+-opaque counters_ref() :: {atomics, reference()} | {write_concurrency, reference()}.
+
+-spec new(Size, Opts) -> counters_ref() when
+ Size :: pos_integer(),
+ Opts :: [Opt],
+ Opt :: atomics | write_concurrency.
+new(Size, [atomics]) ->
+ {atomics, atomics:new(Size, [{signed, true}])};
+new(Size, [write_concurrency]) ->
+ {write_concurrency, erts_internal:counters_new(Size)};
+new(Size, []) ->
+ new(Size, [atomics]);
+new(_, _) ->
+ erlang:error(badarg).
+
+-spec get(Ref, Ix) -> integer() when
+ Ref :: counters_ref(),
+ Ix :: integer().
+get({atomics,Ref}, Ix) ->
+ atomics:get(Ref, Ix);
+get({write_concurrency, Ref}, Ix) ->
+ erts_internal:counters_get(Ref, Ix);
+get(_, _) ->
+ erlang:error(badarg).
+
+
+
+-spec add(Ref, Ix, Incr) -> ok when
+ Ref :: counters_ref(),
+ Ix :: integer(),
+ Incr :: integer().
+add({atomics, Ref}, Ix, Incr) ->
+ atomics:add(Ref, Ix, Incr);
+add({write_concurrency, Ref}, Ix, Incr) ->
+ erts_internal:counters_add(Ref, Ix, Incr);
+add(_, _, _) ->
+ erlang:error(badarg).
+
+
+-spec sub(Ref, Ix, Decr) -> ok when
+ Ref :: counters_ref(),
+ Ix :: integer(),
+ Decr :: integer().
+sub(Ref, Ix, Decr) ->
+ add(Ref, Ix, -Decr).
+
+-spec info(Ref) -> Info when
+ Ref :: counters_ref(),
+ Info :: #{'size':=Size, 'memory':=Memory},
+ Size :: non_neg_integer(),
+ Memory :: non_neg_integer().
+info({atomics, Ref}) ->
+ atomics:info(Ref);
+info({write_concurrency, Ref}) ->
+ erts_internal:counters_info(Ref);
+info(_) ->
+ erlang:error(badarg).
+
diff --git a/erts/preloaded/src/erts.app.src b/erts/preloaded/src/erts.app.src
index 8c34c99a98..ed645d1191 100644
--- a/erts/preloaded/src/erts.app.src
+++ b/erts/preloaded/src/erts.app.src
@@ -33,12 +33,14 @@
prim_file,
prim_inet,
prim_zip,
+ atomics,
+ counters,
zlib
]},
{registered, []},
{applications, []},
{env, []},
- {runtime_dependencies, ["stdlib-3.5", "kernel-6.1", "sasl-3.0.1"]}
+ {runtime_dependencies, ["stdlib-3.5", "kernel-6.1", "sasl-@OTP-13468@"]}
]}.
%% vim: ft=erlang
diff --git a/erts/preloaded/src/erts_internal.erl b/erts/preloaded/src/erts_internal.erl
index 88f47e917b..d491a505c6 100644
--- a/erts/preloaded/src/erts_internal.erl
+++ b/erts/preloaded/src/erts_internal.erl
@@ -90,6 +90,13 @@
-export([create_dist_channel/4]).
+-export([erase_persistent_terms/0]).
+
+-export([atomics_new/2]).
+
+-export([counters_new/1, counters_get/2, counters_add/3,
+ counters_info/1]).
+
%%
%% Await result of send to port
%%
@@ -691,3 +698,27 @@ process_flag(_Pid, _Flag, _Value) ->
create_dist_channel(_Node, _DistCtrlr, _Flags, _Ver) ->
erlang:nif_error(undefined).
+
+-spec erase_persistent_terms() -> 'ok'.
+erase_persistent_terms() ->
+ erlang:nif_error(undefined).
+
+-spec atomics_new(pos_integer(), pos_integer()) -> reference().
+atomics_new(_Arity, _EncOpts) ->
+ erlang:nif_error(undef).
+
+-spec counters_new(pos_integer()) -> reference().
+counters_new(_Size) ->
+ erlang:nif_error(undef).
+
+-spec counters_get(reference(), pos_integer()) -> integer().
+counters_get(_Ref, _Ix) ->
+ erlang:nif_error(undef).
+
+-spec counters_add(reference(), pos_integer(), integer()) -> ok.
+counters_add(_Ref, _Ix, _Incr) ->
+ erlang:nif_error(undef).
+
+-spec counters_info(reference()) -> #{}.
+counters_info(_Ref) ->
+ erlang:nif_error(undef).
diff --git a/erts/preloaded/src/init.erl b/erts/preloaded/src/init.erl
index 253fcf7a1f..b4b8b3bf9b 100644
--- a/erts/preloaded/src/init.erl
+++ b/erts/preloaded/src/init.erl
@@ -552,6 +552,7 @@ stop(Reason,State) ->
do_stop(restart,#state{start = Start, flags = Flags, args = Args}) ->
%% Make sure we don't have any outstanding messages before doing the restart.
flush(),
+ erts_internal:erase_persistent_terms(),
boot(Start,Flags,Args);
do_stop(reboot,_) ->
halt();
diff --git a/erts/preloaded/src/persistent_term.erl b/erts/preloaded/src/persistent_term.erl
new file mode 100644
index 0000000000..5d0c266127
--- /dev/null
+++ b/erts/preloaded/src/persistent_term.erl
@@ -0,0 +1,55 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(persistent_term).
+
+-export([erase/1,get/0,get/1,info/0,put/2]).
+
+-type key() :: term().
+-type value() :: term().
+
+-spec erase(Key) -> Result when
+ Key :: key(),
+ Result :: boolean().
+erase(_Key) ->
+ erlang:nif_error(undef).
+
+-spec get() -> List when
+ List :: [{key(),value()}].
+get() ->
+ erlang:nif_error(undef).
+
+-spec get(Key) -> Value when
+ Key :: key(),
+ Value :: value().
+get(_Key) ->
+ erlang:nif_error(undef).
+
+-spec info() -> Info when
+ Info :: #{'count':=Count,'memory':=Memory},
+ Count :: non_neg_integer(),
+ Memory :: non_neg_integer().
+info() ->
+ erlang:nif_error(undef).
+
+-spec put(Key, Value) -> 'ok' when
+ Key :: key(),
+ Value :: value().
+put(_Key, _Value) ->
+ erlang:nif_error(undef).
diff --git a/erts/preloaded/src/prim_inet.erl b/erts/preloaded/src/prim_inet.erl
index 963e8933bc..f1d938c9a4 100644
--- a/erts/preloaded/src/prim_inet.erl
+++ b/erts/preloaded/src/prim_inet.erl
@@ -49,9 +49,15 @@
-include("inet_sctp.hrl").
-include("inet_int.hrl").
-%-define(DEBUG, 1).
+%%%-define(DEBUG, 1).
-ifdef(DEBUG).
--define(DBG_FORMAT(Format, Args), (io:format((Format), (Args)))).
+-define(
+ DBG_FORMAT(Format, Args),
+ begin
+ %% io:format((Format), (Args)),
+ erlang:display(lists:flatten(io_lib:format((Format), (Args)))),
+ ok
+ end).
-else.
-define(DBG_FORMAT(Format, Args), ok).
-endif.
@@ -150,39 +156,96 @@ shutdown_1(S, How) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
close(S) when is_port(S) ->
+ ?DBG_FORMAT("prim_inet:close(~p)~n", [S]),
case getopt(S, linger) of
{ok,{true,0}} ->
close_port(S);
- _ ->
- case subscribe(S, [subs_empty_out_q]) of
- {ok, [{subs_empty_out_q,N}]} when N > 0 ->
- close_pend_loop(S, N); %% wait for pending output to be sent
- _ ->
- close_port(S)
- end
+ {ok,{true,T}} ->
+ %% Wait for T seconds for pending output to be sent
+ %%
+ %% Note that this handling of Linger may look ok,
+ %% but sweeps some problems under the rug since
+ %% there are OS buffers that may have remaining data
+ %% after the inet driver has emptied its buffers.
+ %% But Linger for nonblocking sockets is broken
+ %% anyway on all OS:es, according to hearsay,
+ %% and is a contradiction in itself.
+ %% We have hereby done our best...
+ %%
+ Tref = erlang:start_timer(T * 1000, self(), close_port),
+ close_pend_loop(S, Tref, undefined);
+ _ -> % Regard this as {ok,{false,_}}
+ case subscribe(S, [subs_empty_out_q]) of
+ {ok, [{subs_empty_out_q,N}]} when N > 0 ->
+ %% Wait for pending output to be sent
+ DefaultT = 180000, % Arbitrary system timeout 3 min
+ Tref = erlang:start_timer(DefaultT, self(), close_port),
+ close_pend_loop(S, Tref, N);
+ _ ->
+ %% Subscribe failed or empty out q - give up or done
+ close_port(S)
+ end
end.
-close_pend_loop(S, N) ->
+close_pend_loop(S, Tref, N) ->
+ ?DBG_FORMAT("prim_inet:close_pend_loop(~p, _, ~p)~n", [S,N]),
receive
- {empty_out_q,S} ->
- close_port(S)
+ {timeout,Tref,_} -> % Linger timeout
+ ?DBG_FORMAT("prim_inet:close_pend_loop(~p, _, _) timeout~n", [S]),
+ close_port(S);
+ {empty_out_q,S} when N =/= undefined ->
+ ?DBG_FORMAT(
+ "prim_inet:close_pend_loop(~p, _, _) empty_out_q~n", [S]),
+ close_port(S, Tref)
after ?INET_CLOSE_TIMEOUT ->
case getstat(S, [send_pend]) of
{ok, [{send_pend,N1}]} ->
+ ?DBG_FORMAT(
+ "prim_inet:close_pend_loop(~p, _, _) send_pend ~p~n",
+ [S,N1]),
if
- N1 =:= N ->
- close_port(S);
- true ->
- close_pend_loop(S, N1)
+ N1 =:= 0 ->
+ %% Empty outq - done
+ close_port(S, Tref);
+ N =:= undefined ->
+ %% Within linger time - wait some more
+ close_pend_loop(S, Tref, N);
+ N1 =:= N ->
+ %% Inactivity - give up
+ close_port(S, Tref);
+ true ->
+ %% Still moving - wait some more
+ close_pend_loop(S, Tref, N)
end;
- _ ->
- close_port(S)
- end
+ _Stat ->
+ %% Failed getstat - give up
+ ?DBG_FORMAT(
+ "prim_inet:close_pend_loop(~p, _, _) getstat ~p~n",
+ [S,_Stat]),
+ close_port(S, Tref)
+ end
end.
+
+close_port(S, Tref) ->
+ ?DBG_FORMAT("prim_inet:close_port(~p, _)~n", [S]),
+ case erlang:cancel_timer(Tref) of
+ false ->
+ receive
+ {timeout,Tref,_} ->
+ ok
+ end;
+ _N ->
+ ok
+ end,
+ close_port(S).
+%%
close_port(S) ->
- catch erlang:port_close(S),
- receive {'EXIT',S,_} -> ok after 0 -> ok end.
+ ?DBG_FORMAT("prim_inet:close_port(~p)~n", [S]),
+ _Closed = (catch erlang:port_close(S)),
+ receive {'EXIT',S,_} -> ok after 0 -> ok end,
+ ?DBG_FORMAT("prim_inet:close_port(~p) ~p~n", [S,_Closed]),
+ ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
@@ -424,23 +487,49 @@ peeloff(S, AssocId) ->
%% be called directly -- use "sendmsg" instead:
%%
send(S, Data, OptList) when is_port(S), is_list(OptList) ->
- ?DBG_FORMAT("prim_inet:send(~p, ~p)~n", [S,Data]),
+ ?DBG_FORMAT("prim_inet:send(~p, _, ~p)~n", [S,OptList]),
try erlang:port_command(S, Data, OptList) of
false -> % Port busy and nosuspend option passed
?DBG_FORMAT("prim_inet:send() -> {error,busy}~n", []),
{error,busy};
true ->
- receive
- {inet_reply,S,Status} ->
- ?DBG_FORMAT("prim_inet:send() -> ~p~n", [Status]),
- Status
- end
+ send_recv_reply(S, undefined)
catch
error:_Error ->
?DBG_FORMAT("prim_inet:send() -> {error,einval}~n", []),
{error,einval}
end.
+send_recv_reply(S, Mref) ->
+ ReplyTimeout =
+ case Mref of
+ undefined ->
+ ?INET_CLOSE_TIMEOUT;
+ _ ->
+ infinity
+ end,
+ receive
+ {inet_reply,S,Status} ->
+ ?DBG_FORMAT(
+ "prim_inet:send_recv_reply(~p, _): inet_reply ~p~n",
+ [S,Status]),
+ case Mref of
+ undefined -> ok;
+ _ ->
+ demonitor(Mref, [flush]),
+ ok
+ end,
+ Status;
+ {'DOWN',Mref,_,_,_Reason} when Mref =/= undefined ->
+ ?DBG_FORMAT(
+ "prim_inet:send_recv_reply(~p, _) 'DOWN' ~p~n",
+ [S,_Reason]),
+ {error,closed}
+ after ReplyTimeout ->
+ send_recv_reply(S, monitor(port, S))
+ end.
+
+
send(S, Data) ->
send(S, Data, []).
diff --git a/lib/compiler/src/beam_jump.erl b/lib/compiler/src/beam_jump.erl
index 9eee56d604..22974da398 100644
--- a/lib/compiler/src/beam_jump.erl
+++ b/lib/compiler/src/beam_jump.erl
@@ -128,7 +128,7 @@
%%% on the program state.
%%%
--import(lists, [reverse/1,reverse/2,foldl/3]).
+-import(lists, [dropwhile/2,reverse/1,reverse/2,foldl/3]).
-type instruction() :: beam_utils:instruction().
@@ -411,14 +411,19 @@ opt_useless_loads([{test,_,{f,L},_}=I|Is], L, St) ->
opt_useless_loads(Is, _L, St) ->
{Is,St}.
-opt_useless_block_loads([{set,[Dst],_,_}=I|Is], L, Index) ->
- BlockJump = [{block,Is},{jump,{f,L}}],
+opt_useless_block_loads([{set,[Dst],_,_}=I|Is0], L, Index) ->
+ BlockJump = [{block,Is0},{jump,{f,L}}],
case beam_utils:is_killed(Dst, BlockJump, Index) of
true ->
- %% The register is killed and not used, we can remove the load
+ %% The register is killed and not used, we can remove the load.
+ %% Remove any `put` instructions in case we just
+ %% removed a `put_tuple` instruction.
+ Is = dropwhile(fun({set,_,_,put}) -> true;
+ (_) -> false
+ end, Is0),
opt_useless_block_loads(Is, L, Index);
false ->
- [I|opt_useless_block_loads(Is, L, Index)]
+ [I|opt_useless_block_loads(Is0, L, Index)]
end;
opt_useless_block_loads([I|Is], L, Index) ->
[I|opt_useless_block_loads(Is, L, Index)];
diff --git a/lib/compiler/test/beam_jump_SUITE.erl b/lib/compiler/test/beam_jump_SUITE.erl
index c61e4ab65c..faedc0c1f1 100644
--- a/lib/compiler/test/beam_jump_SUITE.erl
+++ b/lib/compiler/test/beam_jump_SUITE.erl
@@ -21,7 +21,8 @@
-export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1,
init_per_group/2,end_per_group/2,
- undefined_label/1,ambiguous_catch_try_state/1]).
+ undefined_label/1,ambiguous_catch_try_state/1,
+ build_tuple/1]).
suite() ->
[{ct_hooks,[ts_install_cth]}].
@@ -32,7 +33,8 @@ all() ->
groups() ->
[{p,[parallel],
[undefined_label,
- ambiguous_catch_try_state
+ ambiguous_catch_try_state,
+ build_tuple
]}].
init_per_suite(Config) ->
@@ -72,3 +74,16 @@ river() -> song.
checks(Wanted) ->
%% Must be one line to cause the unsafe optimization.
{catch case river() of sheet -> begin +Wanted, if "da" -> Wanted end end end, catch case river() of sheet -> begin + Wanted, if "da" -> Wanted end end end}.
+
+-record(message2, {id, p1}).
+-record(message3, {id, p1, p2}).
+
+build_tuple(_Config) ->
+ {'EXIT',{{badrecord,message3},_}} = (catch do_build_tuple(#message2{})),
+ ok.
+
+do_build_tuple(Message) ->
+ if is_record(Message, message2) ->
+ Res = {res, rand:uniform(100)},
+ {Message#message3.id, Res}
+ end.
diff --git a/lib/crypto/src/crypto.erl b/lib/crypto/src/crypto.erl
index 2db73c4af0..c2ab88417e 100644
--- a/lib/crypto/src/crypto.erl
+++ b/lib/crypto/src/crypto.erl
@@ -2026,7 +2026,7 @@ check_otp_test_engine(LibDir) ->
case filelib:wildcard("otp_test_engine*", LibDir) of
[] ->
{error, notexist};
- [LibName] ->
+ [LibName|_] -> % In case of Valgrind there could be more than one
LibPath = filename:join(LibDir,LibName),
case filelib:is_file(LibPath) of
true ->
diff --git a/lib/crypto/test/engine_SUITE.erl b/lib/crypto/test/engine_SUITE.erl
index b083b30d70..0427923941 100644
--- a/lib/crypto/test/engine_SUITE.erl
+++ b/lib/crypto/test/engine_SUITE.erl
@@ -75,11 +75,18 @@ groups() ->
init_per_suite(Config) ->
- case crypto:info_lib() of
- [{_,_, <<"OpenSSL 1.0.1s-freebsd 1 Mar 2016">>}] ->
+ case {os:type(), crypto:info_lib()} of
+ {_, [{_,_, <<"OpenSSL 1.0.1s-freebsd 1 Mar 2016">>}]} ->
{skip, "Problem with engine on OpenSSL 1.0.1s-freebsd"};
- Res ->
- ct:log("crypto:info_lib() -> ~p\n", [Res]),
+
+ {{unix,darwin}, _} ->
+ {skip, "Engine unsupported on Darwin"};
+
+ {{win32,_}, _} ->
+ {skip, "Engine unsupported on Windows"};
+
+ {OS, Res} ->
+ ct:log("crypto:info_lib() -> ~p\nos:type() -> ~p", [Res,OS]),
try crypto:start() of
ok ->
Config;
diff --git a/lib/eldap/doc/src/notes.xml b/lib/eldap/doc/src/notes.xml
index b390e0c047..bf9358c4d1 100644
--- a/lib/eldap/doc/src/notes.xml
+++ b/lib/eldap/doc/src/notes.xml
@@ -77,6 +77,22 @@
</section>
+<section><title>Eldap 1.2.3.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A race condition at close could cause the eldap client to
+ exit with a badarg message as cause.</p>
+ <p>
+ Own Id: OTP-15342 Aux Id: ERIERL-242 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Eldap 1.2.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -92,6 +108,22 @@
</section>
+<section><title>Eldap 1.2.2.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A race condition at close could cause the eldap client to
+ exit with a badarg message as cause.</p>
+ <p>
+ Own Id: OTP-15342 Aux Id: ERIERL-242 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Eldap 1.2.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -368,4 +400,3 @@
<p>New application. </p>
</section>
</chapter>
-
diff --git a/lib/hipe/main/hipe.erl b/lib/hipe/main/hipe.erl
index ac2e6c1e3b..e2cb9c0f0b 100644
--- a/lib/hipe/main/hipe.erl
+++ b/lib/hipe/main/hipe.erl
@@ -542,7 +542,7 @@ file(File) ->
| {'error', term()}
when Mod :: mod().
file(File, Options) when is_atom(File) ->
- case beam_lib:info(File) of
+ case beam_lib:info(atom_to_list(File)) of
L when is_list(L) ->
{module, Mod} = lists:keyfind(module, 1, L),
case compile(Mod, File, Options) of
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
index 464c65ba76..2bcf137299 100644
--- a/lib/kernel/doc/src/logger.xml
+++ b/lib/kernel/doc/src/logger.xml
@@ -748,6 +748,14 @@ start(_, []) ->
exists, its associated value will be changed
to the given value. If it does not exist, it will
be added.</p>
+ <p>If the value is incomplete, which for example can be the
+ case for the <c>config</c> key, it is up to the handler
+ implementation how the unspecified parts are set. For all
+ handlers in the Kernel application, unspecified data for
+ the <c>config</c> key is set to default values. To update
+ only specified data, and keep the existing configuration for
+ the rest, use <seealso marker="#update_handler_config-3">
+ <c>update_handler_config/3</c></seealso>.</p>
<p>See the definition of
the <seealso marker="#type-handler_config">
<c>handler_config()</c></seealso> type for more
@@ -933,6 +941,42 @@ logger:set_handler_config(HandlerId, maps:merge(Old, Config)).
</func>
<func>
+ <name name="update_handler_config" arity="3" clause_i="1"/>
+ <name name="update_handler_config" arity="3" clause_i="2"/>
+ <name name="update_handler_config" arity="3" clause_i="3"/>
+ <name name="update_handler_config" arity="3" clause_i="4"/>
+ <name name="update_handler_config" arity="3" clause_i="5"/>
+ <fsummary>Add or update configuration data for the specified
+ handler.</fsummary>
+ <type variable="HandlerId"/>
+ <type variable="Level" name_i="1"/>
+ <type variable="FilterDefault" name_i="2"/>
+ <type variable="Filters" name_i="3"/>
+ <type variable="Formatter" name_i="4"/>
+ <type variable="Config" name_i="5"/>
+ <type variable="Return"/>
+ <desc>
+ <p>Add or update configuration data for the specified
+ handler. If the given <c><anno>Key</anno></c> already
+ exists, its associated value will be changed
+ to the given value. If it does not exist, it will
+ be added.</p>
+ <p>If the value is incomplete, which for example can be the
+ case for the <c>config</c> key, it is up to the handler
+ implementation how the unspecified parts are set. For all
+ handlers in the Kernel application, unspecified data for
+ the <c>config</c> key is not changed. To reset unspecified
+ data to default values,
+ use <seealso marker="#set_handler_config-3">
+ <c>set_handler_config/3</c></seealso>.</p>
+ <p>See the definition of
+ the <seealso marker="#type-handler_config">
+ <c>handler_config()</c></seealso> type for more
+ information about the different parameters.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="update_primary_config" arity="1"/>
<fsummary>Update primary configuration data for Logger.</fsummary>
<desc>
@@ -1041,10 +1085,11 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
</func>
<func>
- <name>HModule:changing_config(Config1, Config2) -> {ok, Config3} | {error, Reason}</name>
+ <name>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig) -> {ok, Config} | {error, Reason}</name>
<fsummary>The configuration for this handler is about to change.</fsummary>
<type>
- <v>Config1 = Config2 = Config3 =
+ <v>SetOrUpdate = set | update</v>
+ <v>OldConfig = NewConfig = Config =
<seealso marker="#type-handler_config">handler_config()</seealso></v>
<v>Reason = term()</v>
</type>
@@ -1053,18 +1098,51 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
<p>The function is called on a temporary process when the
configuration for a handler is about to change. The purpose
is to verify and act on the new configuration.</p>
- <p><c>Config1</c> is the existing configuration
- and <c>Config2</c> is the new configuration.</p>
+ <p><c>OldConfig</c> is the existing configuration
+ and <c>NewConfig</c> is the new configuration.</p>
<p>The handler identity is associated with the <c>id</c> key
- in <c>Config1</c>.</p>
+ in <c>OldConfig</c>.</p>
+ <p><c>SetOrUpdate</c> has the value <c>set</c> if the
+ configuration change originates from a call to
+ <seealso marker="#set_handler_config-2">
+ <c>set_handler_config/2,3</c></seealso>, and <c>update</c>
+ if it originates from <seealso marker="#update_handler_config-2">
+ <c>update_handler_config/2,3</c></seealso>. The handler can
+ use this parameteter to decide how to update the value of
+ the <c>config</c> field, that is, the handler specific
+ configuration data. Typically, if <c>SetOrUpdate</c>
+ equals <c>set</c>, values that are not specified must be
+ given their default values. If <c>SetOrUpdate</c>
+ equals <c>update</c>, the values found in <c>OldConfig</c>
+ must be used instead.</p>
<p>If everything succeeds, the callback function must return a
- possibly adjusted configuration in <c>{ok,Config3}</c>.</p>
+ possibly adjusted configuration in <c>{ok,Config}</c>.</p>
<p>If the configuration is faulty, the callback function must
return <c>{error,Reason}</c>.</p>
</desc>
</func>
<func>
+ <name>HModule:filter_config(Config) -> FilteredConfig</name>
+ <fsummary>Remove internal data from configuration.</fsummary>
+ <type>
+ <v>Config = FilteredConfig =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called when one of the Logger API functions
+ for fetching the handler configuration is called, for
+ example
+ <seealso marker="#get_handler_config-1">
+ <c>logger:get_handler_config/1</c></seealso>.</p>
+ <p>It allows the handler to remove internal data fields from
+ its configuration data before it is returned to the
+ caller.</p>
+ </desc>
+ </func>
+
+ <func>
<name>HModule:log(LogEvent, Config) -> void()</name>
<fsummary>Log the given log event.</fsummary>
<type>
@@ -1136,7 +1214,7 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
<item><seealso marker="logger#set_handler_config-2">
<c>logger:set_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_handler_config-2">
- <c>logger:updata_handler_config/2</c></seealso></item>
+ <c>logger:updata_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_formatter_config-2">
<c>logger:update_formatter_config/2</c></seealso></item>
</list>
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
index 4a81cfa34a..1870d2ab79 100644
--- a/lib/kernel/doc/src/logger_chapter.xml
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -384,8 +384,8 @@ logger:debug(#{got => connection_request, id => Id, state => State},
<p>In addition to the mandatory callback function <c>log/2</c>, a
handler module can export the optional callback
- functions <c>adding_handler/1</c>, <c>changing_config/2</c>
- and <c>removing_handler/1</c>. See
+ functions <c>adding_handler/1</c>, <c>changing_config/3</c>,
+ <c>filter_config/1</c>, and <c>removing_handler/1</c>. See
section <seealso marker="logger#handler_callback_functions">Handler
Callback Functions</seealso> in the logger(3) manual page for
more information about these function.</p>
@@ -555,7 +555,7 @@ logger:debug(#{got => connection_request, id => Id, state => State},
<item><seealso marker="logger#set_handler_config-2">
<c>set_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_handler_config-2">
- <c>update_handler_config/2</c></seealso></item>
+ <c>update_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#add_handler_filter-3">
<c>add_handler_filter/3</c></seealso></item>
<item><seealso marker="logger#remove_handler_filter-2">
@@ -704,9 +704,13 @@ logger:debug(#{got => connection_request, id => Id, state => State},
<item>
<p>If <c>HandlerId</c> is <c>default</c>, then this entry
modifies the default handler, equivalent to calling</p>
- <pre><seealso marker="logger#set_handler_config-2">
- logger:set_handler_config(default, Module, HandlerConfig)
- </seealso></pre>
+ <pre><seealso marker="logger#remove_handler-1">
+ logger:remove_handler(default)
+ </seealso></pre>
+ <p>followed by</p>
+ <pre><seealso marker="logger#add_handler-3">
+ logger:add_handler(default, Module, HandlerConfig)
+ </seealso></pre>
<p>For all other values of <c>HandlerId</c>, this entry
adds a new handler, equivalent to calling</p>
<pre><seealso marker="logger:add_handler/3">
@@ -1024,7 +1028,8 @@ ok</pre>
<list>
<item><c>adding_handler(Config)</c></item>
<item><c>removing_handler(Config)</c></item>
- <item><c>changing_config(OldConfig, NewConfig)</c></item>
+ <item><c>changing_config(SetOrUpdate, OldConfig, NewConfig)</c></item>
+ <item><c>filter_config(Config)</c></item>
</list>
<p>When a handler is added, by for example a call
to <seealso marker="logger#add_handler-3">
@@ -1043,11 +1048,18 @@ ok</pre>
<p>When <seealso marker="logger#set_handler_config-2">
<c>logger:set_handler_config/2,3</c></seealso>
or <seealso marker="logger#update_handler_config/2">
- <c>logger:update_handler_config/2</c></seealso> is called,
+ <c>logger:update_handler_config/2,3</c></seealso> is called,
Logger
- calls <c>HModule:changing_config(OldConfig, NewConfig)</c>. If
+ calls <c>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig)</c>. If
this function returns <c>{ok,NewConfig1}</c>, Logger
writes <c>NewConfig1</c> to the configuration database.</p>
+ <p>When <seealso marker="logger#get_config-0">
+ <c>logger:get_config/0</c></seealso> or
+ <seealso marker="logger#get_handler_config-0">
+ <c>logger:get_handler_config/0,1</c></seealso> is called,
+ Logger calls <c>HModule:filter_config(Config)</c>. This function
+ must return the handler configuration where internal data is
+ removed.</p>
<p>A simple handler that prints to the terminal can be implemented
as follows:</p>
diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml
index dfe2ab3275..d9b941a0a9 100644
--- a/lib/kernel/doc/src/logger_disk_log_h.xml
+++ b/lib/kernel/doc/src/logger_disk_log_h.xml
@@ -66,6 +66,10 @@
corresponds to the <c>name</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to the same name as the handler identity, in the
+ current directory.</p>
</item>
<tag><c>type</c></tag>
<item>
@@ -73,6 +77,8 @@
corresponds to the <c>type</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
<p>Defaults to <c>wrap</c>.</p>
</item>
<tag><c>max_no_files</c></tag>
@@ -82,6 +88,8 @@
corresponds to the <c>MaxNoFiles</c> element in the <c>size</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
<p>Defaults to <c>10</c>.</p>
<p>The setting has no effect on a halt log.</p>
</item>
@@ -93,6 +101,8 @@
corresponds to the <c>MaxNoBytes</c> element in the <c>size</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
<p>Defaults to <c>1048576</c> bytes for a wrap log, and
<c>infinity</c> for a halt log.</p>
</item>
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
index fcd8189bae..e156f5719b 100644
--- a/lib/kernel/doc/src/logger_std_h.xml
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -74,7 +74,9 @@
circular logging. Use the disk_log handler,
<seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>,
for this.</p>
- <p> Defaults to <c>standard_io</c>.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>standard_io</c>.</p>
</item>
<tag><c>filesync_repeat_interval</c></tag>
<item>
diff --git a/lib/kernel/src/inet_tcp_dist.erl b/lib/kernel/src/inet_tcp_dist.erl
index d1701afdaa..c37212b0f9 100644
--- a/lib/kernel/src/inet_tcp_dist.erl
+++ b/lib/kernel/src/inet_tcp_dist.erl
@@ -450,7 +450,7 @@ get_tcp_address(Driver, Socket) ->
get_address_resolver(EpmdModule) ->
case erlang:function_exported(EpmdModule, address_please, 3) of
true -> {EpmdModule, address_please};
- _ -> {inet, getaddr}
+ _ -> {erl_epmd, address_please}
end.
%% ------------------------------------------------------------
diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl
index 752dd8d493..6762998d4f 100644
--- a/lib/kernel/src/logger.erl
+++ b/lib/kernel/src/logger.erl
@@ -43,7 +43,8 @@
get_module_level/0, get_module_level/1,
set_primary_config/1, set_primary_config/2,
set_handler_config/2, set_handler_config/3,
- update_primary_config/1, update_handler_config/2,
+ update_primary_config/1,
+ update_handler_config/2, update_handler_config/3,
update_formatter_config/2, update_formatter_config/3,
get_primary_config/0, get_handler_config/1,
get_handler_config/0, get_handler_ids/0, get_config/0,
@@ -423,6 +424,29 @@ set_handler_config(HandlerId,Config) ->
update_primary_config(Config) ->
logger_server:update_config(primary,Config).
+-spec update_handler_config(HandlerId,level,Level) -> Return when
+ HandlerId :: handler_id(),
+ Level :: level() | all | none,
+ Return :: ok | {error,term()};
+ (HandlerId,filter_default,FilterDefault) -> Return when
+ HandlerId :: handler_id(),
+ FilterDefault :: log | stop,
+ Return :: ok | {error,term()};
+ (HandlerId,filters,Filters) -> Return when
+ HandlerId :: handler_id(),
+ Filters :: [{filter_id(),filter()}],
+ Return :: ok | {error,term()};
+ (HandlerId,formatter,Formatter) -> Return when
+ HandlerId :: handler_id(),
+ Formatter :: {module(), formatter_config()},
+ Return :: ok | {error,term()};
+ (HandlerId,config,Config) -> Return when
+ HandlerId :: handler_id(),
+ Config :: term(),
+ Return :: ok | {error,term()}.
+update_handler_config(HandlerId,Key,Value) ->
+ logger_server:update_config(HandlerId,Key,Value).
+
-spec update_handler_config(HandlerId,Config) -> ok | {error,term()} when
HandlerId :: handler_id(),
Config :: handler_config().
@@ -439,7 +463,14 @@ get_primary_config() ->
HandlerId :: handler_id(),
Config :: handler_config().
get_handler_config(HandlerId) ->
- logger_config:get(?LOGGER_TABLE,HandlerId).
+ case logger_config:get(?LOGGER_TABLE,HandlerId) of
+ {ok,#{module:=Module}=Config} ->
+ {ok,try Module:filter_config(Config)
+ catch _:_ -> Config
+ end};
+ Error ->
+ Error
+ end.
-spec get_handler_config() -> [Config] when
Config :: handler_config().
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
index a8f141f135..2a81458ec8 100644
--- a/lib/kernel/src/logger_disk_log_h.erl
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -33,7 +33,8 @@
terminate/2, code_change/3]).
%% logger callbacks
--export([log/2, adding_handler/1, removing_handler/1, changing_config/2]).
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
%% handler internal
-export([log_handler_info/4]).
@@ -114,9 +115,8 @@ reset(Name) ->
%%% Handler being added
adding_handler(#{id:=Name}=Config) ->
case check_config(adding, Config) of
- {ok, Config1} ->
+ {ok, #{config:=HConfig}=Config1} ->
%% create initial handler state by merging defaults with config
- HConfig = maps:get(config, Config1, #{}),
HState = maps:merge(get_init_state(), HConfig),
case logger_h_common:overload_levels_ok(HState) of
true ->
@@ -133,32 +133,40 @@ adding_handler(#{id:=Name}=Config) ->
%%%-----------------------------------------------------------------
%%% Updating handler config
-changing_config(OldConfig = #{id:=Name, config:=OldHConfig},
- NewConfig = #{id:=Name, config:=NewHConfig}) ->
- #{type:=Type, file:=File, max_no_files:=MaxFs,
- max_no_bytes:=MaxBytes} = OldHConfig,
- case NewHConfig of
- #{type:=Type, file:=File, max_no_files:=MaxFs,
- max_no_bytes:=MaxBytes} ->
- changing_config1(OldConfig, NewConfig);
- _ ->
- {error,{illegal_config_change,OldConfig,NewConfig}}
- end;
-changing_config(OldConfig, NewConfig) ->
- {error,{illegal_config_change,OldConfig,NewConfig}}.
+changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
+ WriteOnce = maps:with([type,file,max_no_files,max_no_bytes],OldHConfig),
+ ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig),
+ NewHConfig0 = maps:get(config, NewConfig, #{}),
+ Default =
+ case SetOrUpdate of
+ set ->
+ %% Do not reset write-once fields to defaults
+ maps:merge(get_default_config(),WriteOnce);
+ update ->
+ OldHConfig
+ end,
-changing_config1(OldConfig=#{config:=OldHConfig}, NewConfig) ->
+ %% Allow (accidentially) included read-only fields - just overwrite them
+ NewHConfig = maps:merge(maps:merge(Default,NewHConfig0),ReadOnly),
+
+ %% But fail if write-once fields are changed
+ case maps:with([type,file,max_no_files,max_no_bytes],NewHConfig) of
+ WriteOnce ->
+ changing_config1(maps:get(handler_pid,OldHConfig),
+ OldConfig,
+ NewConfig#{config=>NewHConfig});
+ Other ->
+ {Old,New} = logger_server:diff_maps(WriteOnce,Other),
+ {error,{illegal_config_change,#{config=>Old},#{config=>New}}}
+ end.
+
+changing_config1(HPid, OldConfig, NewConfig) ->
case check_config(changing, NewConfig) of
- {ok,NewConfig1 = #{config:=NewHConfig}} ->
- #{handler_pid:=HPid,
- mode_tab:=ModeTab} = OldHConfig,
- NewHConfig1 = NewHConfig#{handler_pid=>HPid,
- mode_tab=>ModeTab},
- NewConfig2 = NewConfig1#{config=>NewHConfig1},
- try gen_server:call(HPid, {change_config,OldConfig,NewConfig2},
+ Result = {ok,NewConfig1} ->
+ try gen_server:call(HPid, {change_config,OldConfig,NewConfig1},
?DEFAULT_CALL_TIMEOUT) of
- ok -> {ok,NewConfig2};
- HError -> HError
+ ok -> Result;
+ Error -> Error
catch
_:{timeout,_} -> {error,handler_busy}
end;
@@ -168,10 +176,12 @@ changing_config1(OldConfig=#{config:=OldHConfig}, NewConfig) ->
check_config(adding, #{id:=Name}=Config) ->
%% merge handler specific config data
- HConfig = merge_default_logopts(Name, maps:get(config, Config, #{})),
- case check_h_config(maps:to_list(HConfig)) of
+ HConfig1 = maps:get(config, Config, #{}),
+ HConfig2 = maps:merge(get_default_config(), HConfig1),
+ HConfig3 = merge_default_logopts(Name, HConfig2),
+ case check_h_config(maps:to_list(HConfig3)) of
ok ->
- {ok,Config#{config=>HConfig}};
+ {ok,Config#{config=>HConfig3}};
Error ->
Error
end;
@@ -238,6 +248,11 @@ log(LogEvent, Config = #{id := Name,
Bin = logger_h_common:log_to_binary(LogEvent, Config),
logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(#{config:=HConfig}=Config) ->
+ Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}.
+
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
@@ -438,7 +453,7 @@ code_change(_OldVsn, State, _Extra) ->
%%%-----------------------------------------------------------------
%%%
-get_init_state() ->
+get_default_config() ->
#{sync_mode_qlen => ?SYNC_MODE_QLEN,
drop_mode_qlen => ?DROP_MODE_QLEN,
flush_qlen => ?FLUSH_QLEN,
@@ -449,10 +464,12 @@ get_init_state() ->
overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
- dl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
- filesync_ok_qlen => ?FILESYNC_OK_QLEN,
filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+get_init_state() ->
+ #{dl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN}.
+
%%%-----------------------------------------------------------------
%%% Add a disk_log handler to the logger.
%%% This starts a dedicated handler process which should always
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
index 38ac7d8ffc..94c640cb92 100644
--- a/lib/kernel/src/logger_h_common.erl
+++ b/lib/kernel/src/logger_h_common.erl
@@ -306,8 +306,11 @@ stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}},
exit(HandlerPid, kill)
end,
case ConfigResult of
- {ok,#{module:=HMod}=HConfig} when is_integer(RestartAfter) ->
+ {ok,#{module:=HMod}=HConfig0} when is_integer(RestartAfter) ->
_ = logger:remove_handler(Name),
+ HConfig = try HMod:filter_config(HConfig0)
+ catch _:_ -> HConfig0
+ end,
_ = timer:apply_after(RestartAfter, logger, add_handler,
[Name,HMod,HConfig]);
{ok,_} ->
diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl
index a1d40f1123..b7735dbcf7 100644
--- a/lib/kernel/src/logger_server.erl
+++ b/lib/kernel/src/logger_server.erl
@@ -27,9 +27,13 @@
add_filter/2, remove_filter/2,
set_module_level/2, unset_module_level/0,
unset_module_level/1, cache_module_level/1,
- set_config/2, set_config/3, update_config/2,
+ set_config/2, set_config/3,
+ update_config/2, update_config/3,
update_formatter_config/2]).
+%% Helper
+-export([diff_maps/2]).
+
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2]).
@@ -105,12 +109,25 @@ cache_module_level(Module) ->
gen_server:cast(?SERVER,{cache_module_level,Module}).
set_config(Owner,Key,Value) ->
- update_config(Owner,#{Key=>Value}).
+ case sanity_check(Owner,Key,Value) of
+ ok ->
+ call({change_config,set,Owner,Key,Value});
+ Error ->
+ Error
+ end.
set_config(Owner,Config) ->
case sanity_check(Owner,Config) of
ok ->
- call({set_config,Owner,Config});
+ call({change_config,set,Owner,Config});
+ Error ->
+ Error
+ end.
+
+update_config(Owner,Key,Value) ->
+ case sanity_check(Owner,Key,Value) of
+ ok ->
+ call({change_config,update,Owner,Key,Value});
Error ->
Error
end.
@@ -118,7 +135,7 @@ set_config(Owner,Config) ->
update_config(Owner, Config) ->
case sanity_check(Owner,Config) of
ok ->
- call({update_config,Owner,Config});
+ call({change_config,update,Owner,Config});
Error ->
Error
end.
@@ -204,46 +221,72 @@ handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) ->
handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) ->
Reply = do_remove_filter(Tid,Id,FilterId),
{reply,Reply,State};
-handle_call({update_config,primary,NewConfig}, _From, #state{tid=Tid}=State) ->
+handle_call({change_config,SetOrUpd,primary,Config0}, _From,
+ #state{tid=Tid}=State) ->
+ {ok,#{handlers:=Handlers}=OldConfig} = logger_config:get(Tid,primary),
+ Default =
+ case SetOrUpd of
+ set -> default_config(primary);
+ update -> OldConfig
+ end,
+ Config = maps:merge(Default,Config0),
+ Reply = logger_config:set(Tid,primary,Config#{handlers=>Handlers}),
+ {reply,Reply,State};
+handle_call({change_config,_SetOrUpd,primary,Key,Value}, _From,
+ #state{tid=Tid}=State) ->
{ok,OldConfig} = logger_config:get(Tid,primary),
- Config = maps:merge(OldConfig,NewConfig),
- {reply,logger_config:set(Tid,primary,Config),State};
-handle_call({update_config,HandlerId,NewConfig}, From, #state{tid=Tid}=State) ->
+ Reply = logger_config:set(Tid,primary,OldConfig#{Key=>Value}),
+ {reply,Reply,State};
+handle_call({change_config,SetOrUpd,HandlerId,Config0}, From,
+ #state{tid=Tid}=State) ->
case logger_config:get(Tid,HandlerId) of
{ok,#{module:=Module}=OldConfig} ->
- Config = maps:merge(OldConfig,NewConfig),
- call_h_async(
- fun() ->
- call_h(Module,changing_config,[OldConfig,Config],
- {ok,Config})
- end,
- fun({ok,Config1}) ->
- logger_config:set(Tid,HandlerId,Config1);
- (Error) ->
- Error
- end,From,State);
- Error ->
- {reply,Error,State}
+ Default =
+ case SetOrUpd of
+ set -> default_config(HandlerId,Module);
+ update -> OldConfig
+ end,
+ Config = maps:merge(Default,Config0),
+ case check_config_change(OldConfig,Config) of
+ ok ->
+ call_h_async(
+ fun() ->
+ call_h(Module,changing_config,
+ [SetOrUpd,OldConfig,Config],
+ {ok,Config})
+ end,
+ fun({ok,Config1}) ->
+ logger_config:set(Tid,HandlerId,Config1);
+ (Error) ->
+ Error
+ end,From,State);
+ Error ->
+ {reply,Error,State}
+ end;
+ _ ->
+ {reply,{error,{not_found,HandlerId}},State}
end;
-handle_call({set_config,primary,Config0}, _From, #state{tid=Tid}=State) ->
- Config = maps:merge(default_config(primary),Config0),
- {ok,#{handlers:=Handlers}} = logger_config:get(Tid,primary),
- Reply = logger_config:set(Tid,primary,Config#{handlers=>Handlers}),
- {reply,Reply,State};
-handle_call({set_config,HandlerId,Config0}, From, #state{tid=Tid}=State) ->
+handle_call({change_config,SetOrUpd,HandlerId,Key,Value}, From,
+ #state{tid=Tid}=State) ->
case logger_config:get(Tid,HandlerId) of
{ok,#{module:=Module}=OldConfig} ->
- Config = maps:merge(default_config(HandlerId,Module),Config0),
- call_h_async(
- fun() ->
- call_h(Module,changing_config,[OldConfig,Config],
- {ok,Config})
- end,
- fun({ok,Config1}) ->
- logger_config:set(Tid,HandlerId,Config1);
- (Error) ->
- Error
- end,From,State);
+ Config = OldConfig#{Key=>Value},
+ case check_config_change(OldConfig,Config) of
+ ok ->
+ call_h_async(
+ fun() ->
+ call_h(Module,changing_config,
+ [SetOrUpd,OldConfig,Config],
+ {ok,Config})
+ end,
+ fun({ok,Config1}) ->
+ logger_config:set(Tid,HandlerId,Config1);
+ (Error) ->
+ Error
+ end,From,State);
+ Error ->
+ {reply,Error,State}
+ end;
_ ->
{reply,{error,{not_found,HandlerId}},State}
end;
@@ -320,7 +363,7 @@ call(Request) ->
true when
Action == add_handler; Action == remove_handler;
Action == add_filter; Action == remove_filter;
- Action == update_config; Action == set_config ->
+ Action == change_config ->
{error,{attempting_syncronous_call_to_self,Request}};
_ ->
gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT)
@@ -458,6 +501,15 @@ check_formatter({Mod,Config}) ->
check_formatter(Formatter) ->
throw({invalid_formatter,Formatter}).
+%% When changing configuration for a handler, the id and module fields
+%% can not be changed.
+check_config_change(#{id:=Id,module:=Module},#{id:=Id,module:=Module}) ->
+ ok;
+check_config_change(OldConfig,NewConfig) ->
+ {Old,New} = logger_server:diff_maps(maps:with([id,module],OldConfig),
+ maps:with([id,module],NewConfig)),
+ {error,{illegal_config_change,Old,New}}.
+
call_h(Module, Function, Args, DefRet) ->
%% Not calling code:ensure_loaded + erlang:function_exported here,
%% since in some rare terminal cases, the code_server might not
@@ -466,6 +518,11 @@ call_h(Module, Function, Args, DefRet) ->
catch
C:R:S ->
case {C,R,S} of
+ {error,undef,[{Module,Function=changing_config,Args,_}|_]}
+ when length(Args)=:=3 ->
+ %% Backwards compatible call, if changing_config/3
+ %% did not exist.
+ call_h(Module, Function, tl(Args), DefRet);
{error,undef,[{Module,Function,Args,_}|_]} ->
DefRet;
_ ->
@@ -525,3 +582,14 @@ call_h_reply(Unexpected,State) ->
{process,?SERVER},
{message,Unexpected}]),
{noreply,State}.
+
+%% Return two maps containing only the fields that differ.
+diff_maps(M1,M2) ->
+ diffs(lists:sort(maps:to_list(M1)),lists:sort(maps:to_list(M2)),#{},#{}).
+
+diffs([H|T1],[H|T2],D1,D2) ->
+ diffs(T1,T2,D1,D2);
+diffs([{K,V1}|T1],[{K,V2}|T2],D1,D2) ->
+ diffs(T1,T2,D1#{K=>V1},D2#{K=>V2});
+diffs([],[],D1,D2) ->
+ {D1,D2}.
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
index 66fa6b6ab6..42e0f5caf4 100644
--- a/lib/kernel/src/logger_std_h.erl
+++ b/lib/kernel/src/logger_std_h.erl
@@ -35,7 +35,8 @@
terminate/2, code_change/3]).
%% logger callbacks
--export([log/2, adding_handler/1, removing_handler/1, changing_config/2]).
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
%% handler internal
-export([log_handler_info/4]).
@@ -116,9 +117,8 @@ reset(Name) ->
%%% Handler being added
adding_handler(#{id:=Name}=Config) ->
case check_config(adding, Config) of
- {ok, Config1} ->
+ {ok, #{config:=HConfig}=Config1} ->
%% create initial handler state by merging defaults with config
- HConfig = maps:get(config, Config1, #{}),
HState = maps:merge(get_init_state(), HConfig),
case logger_h_common:overload_levels_ok(HState) of
true ->
@@ -135,22 +135,31 @@ adding_handler(#{id:=Name}=Config) ->
%%%-----------------------------------------------------------------
%%% Updating handler config
-changing_config(OldConfig=#{id:=Name, config:=OldHConfig},
- NewConfig=#{id:=Name}) ->
- #{type:=Type, handler_pid:=HPid, mode_tab:=ModeTab} = OldHConfig,
- NewHConfig = maps:get(config, NewConfig, #{}),
- case maps:get(type, NewHConfig, Type) of
- Type ->
- NewHConfig1 = NewHConfig#{type=>Type,
- handler_pid=>HPid,
- mode_tab=>ModeTab},
- changing_config1(HPid, OldConfig,
- NewConfig#{config=>NewHConfig1});
- _ ->
- {error,{illegal_config_change,OldConfig,NewConfig}}
- end;
-changing_config(OldConfig, NewConfig) ->
- {error,{illegal_config_change,OldConfig,NewConfig}}.
+changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
+ WriteOnce = maps:with([type],OldHConfig),
+ ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig),
+ NewHConfig0 = maps:get(config, NewConfig, #{}),
+ Default =
+ case SetOrUpdate of
+ set ->
+ %% Do not reset write-once fields to defaults
+ maps:merge(get_default_config(),WriteOnce);
+ update ->
+ OldHConfig
+ end,
+
+ %% Allow (accidentially) included read-only fields - just overwrite them
+ NewHConfig = maps:merge(maps:merge(Default, NewHConfig0),ReadOnly),
+
+ %% But fail if write-once fields are changed
+ case maps:with([type],NewHConfig) of
+ WriteOnce ->
+ changing_config1(maps:get(handler_pid,OldHConfig),
+ OldConfig,
+ NewConfig#{config=>NewHConfig});
+ Other ->
+ {error,{illegal_config_change,#{config=>WriteOnce},#{config=>Other}}}
+ end.
changing_config1(HPid, OldConfig, NewConfig) ->
case check_config(changing, NewConfig) of
@@ -169,8 +178,7 @@ changing_config1(HPid, OldConfig, NewConfig) ->
check_config(adding, Config) ->
%% Merge in defaults on handler level
HConfig0 = maps:get(config, Config, #{}),
- HConfig = maps:merge(#{type => standard_io},
- HConfig0),
+ HConfig = maps:merge(get_default_config(),HConfig0),
case check_h_config(maps:to_list(HConfig)) of
ok ->
{ok,Config#{config=>HConfig}};
@@ -223,6 +231,11 @@ log(LogEvent, Config = #{id := Name,
Bin = logger_h_common:log_to_binary(LogEvent, Config),
logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(#{config:=HConfig}=Config) ->
+ Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}.
+
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
@@ -428,8 +441,9 @@ code_change(_OldVsn, State, _Extra) ->
%%%-----------------------------------------------------------------
%%%
-get_init_state() ->
- #{sync_mode_qlen => ?SYNC_MODE_QLEN,
+get_default_config() ->
+ #{type => standard_io,
+ sync_mode_qlen => ?SYNC_MODE_QLEN,
drop_mode_qlen => ?DROP_MODE_QLEN,
flush_qlen => ?FLUSH_QLEN,
burst_limit_enable => ?BURST_LIMIT_ENABLE,
@@ -439,10 +453,12 @@ get_init_state() ->
overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
- file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
- filesync_ok_qlen => ?FILESYNC_OK_QLEN,
filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+get_init_state() ->
+ #{file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN}.
+
%%%-----------------------------------------------------------------
%%% Add a standard handler to the logger.
%%% This starts a dedicated handler process which should always
@@ -825,4 +841,3 @@ sync_dev(Fd, DevName, PrevSyncResult, HandlerName) ->
logger_h_common:error_notify({HandlerName,filesync,DevName,Error}),
Error
end.
-
diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl
index 194522c009..04c0c48e3a 100644
--- a/lib/kernel/test/gen_tcp_misc_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl
@@ -52,7 +52,8 @@
several_accepts_in_one_go/1, accept_system_limit/1,
active_once_closed/1, send_timeout/1, send_timeout_active/1,
otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1,
- wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1]).
+ wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1,
+ otp_12242/1]).
%% Internal exports.
-export([sender/3, not_owner/1, passive_sockets_server/2, priority_server/1,
@@ -95,7 +96,8 @@ all() ->
killing_multi_acceptors2, several_accepts_in_one_go, accept_system_limit,
active_once_closed, send_timeout, send_timeout_active, otp_7731,
wrapping_oct,
- zombie_sockets, otp_7816, otp_8102, otp_9389].
+ zombie_sockets, otp_7816, otp_8102, otp_9389,
+ otp_12242].
groups() ->
[].
@@ -3284,3 +3286,143 @@ otp_13939(Config) when is_list(Config) ->
exit(Pid, normal),
ct:fail("Server process blocked on send.")
end.
+
+otp_12242(Config) when is_list(Config) ->
+ case os:type() of
+ {win32,_} ->
+ %% Even if we set sndbuf and recbuf to small sizes
+ %% Windows either happily accepts to send GBytes of data
+ %% in no time, so the second send below that is supposed
+ %% to time out just succedes, or the first send that
+ %% is supposed to fill the inet_drv I/O queue and
+ %% start waiting for when more data can be sent
+ %% instead sends all data but suffers a send
+ %% failure that closes the socket
+ {skipped,backpressure_broken_on_win32};
+ _ ->
+ %% Find the IPv4 address of an up and running interface
+ %% that is not loopback nor pointtopoint
+ {ok,IFList} = inet:getifaddrs(),
+ ct:pal("IFList ~p~n", [IFList]),
+ case
+ lists:flatten(
+ [lists:filtermap(
+ fun ({addr,Addr}) when tuple_size(Addr) =:= 4 ->
+ {true,Addr};
+ (_) ->
+ false
+ end, Opts)
+ || {_,Opts} <- IFList,
+ case lists:keyfind(flags, 1, Opts) of
+ {_,Flags} ->
+ lists:member(up, Flags)
+ andalso
+ lists:member(running, Flags)
+ andalso
+ not lists:member(loopback, Flags)
+ andalso
+ not lists:member(pointtopoint, Flags);
+ false ->
+ false
+ end])
+ of
+ [Addr|_] ->
+ otp_12242(Addr);
+ Other ->
+ {skipped,{no_external_address,Other}}
+ end
+ end;
+%%
+otp_12242(Addr) when tuple_size(Addr) =:= 4 ->
+ ct:timetrap(30000),
+ ct:pal("Using address ~p~n", [Addr]),
+ Bufsize = 16 * 1024,
+ Datasize = 128 * 1024 * 1024, % At least 1 s on GBit interface
+ Blob = binary:copy(<<$x>>, Datasize),
+ LOpts =
+ [{backlog,4},{reuseaddr,true},{ip,Addr},
+ binary,{active,false},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ COpts =
+ [binary,{active,false},{ip,Addr},
+ {linger,{true,1}}, % 1 s
+ {send_timeout,500},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ Dir = filename:dirname(code:which(?MODULE)),
+ {ok,ListenerNode} =
+ test_server:start_node(
+ ?UNIQ_NODE_NAME, slave, [{args,"-pa " ++ Dir}]),
+ Tester = self(),
+ Listener =
+ spawn(
+ ListenerNode,
+ fun () ->
+ {ok,L} = gen_tcp:listen(0, LOpts),
+ {ok,LPort} = inet:port(L),
+ Tester ! {self(),port,LPort},
+ {ok,A} = gen_tcp:accept(L),
+ ok = gen_tcp:close(L),
+ receive
+ {Tester,stop} ->
+ ok = gen_tcp:close(A)
+ end
+ end),
+ ListenerMref = monitor(process, Listener),
+ LPort = receive {Listener,port,P} -> P end,
+ {ok,C} = gen_tcp:connect(Addr, LPort, COpts, infinity),
+ {ok,ReadCOpts} = inet:getopts(C, [recbuf,sndbuf,buffer]),
+ ct:pal("ReadCOpts ~p~n", [ReadCOpts]),
+ %%
+ %% Fill the buffers
+ ct:pal("Sending ~p bytes~n", [Datasize]),
+ ok = gen_tcp:send(C, Blob),
+ ct:pal("Sent ~p bytes~n", [Datasize]),
+ %% Spawn the Closer,
+ %% try to ensure that the close call is in progress
+ %% before the owner proceeds with sending
+ Owner = self(),
+ {_Closer,CloserMref} =
+ spawn_opt(
+ fun () ->
+ Owner ! {tref, erlang:start_timer(50, Owner, closing)},
+ ct:pal("Calling gen_tcp:close(C)~n"),
+ try gen_tcp:close(C) of
+ Result ->
+ ct:pal("gen_tcp:close(C) -> ~p~n", [Result]),
+ ok = Result
+ catch
+ Class:Reason:Stacktrace ->
+ ct:pal(
+ "gen_tcp:close(C) >< ~p:~p~n ~p~n",
+ [Class,Reason,Stacktrace]),
+ erlang:raise(Class, Reason, Stacktrace)
+ end
+ end, [link,monitor]),
+ receive
+ {tref,Tref} ->
+ receive {timeout,Tref,_} -> ok end,
+ ct:pal("Sending ~p bytes again~n", [Datasize]),
+ %% Now should the close be in progress...
+ %% All buffers are full, remote end is not reading,
+ %% and the send timeout is 1 s so this will timeout:
+ {error,timeout} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes again timed out~n", [Datasize]),
+ ok = inet:setopts(C, [{send_timeout,10000}]),
+ %% There is a hidden timeout here. Port close is sampled
+ %% every 5 s by prim_inet:send_recv_reply.
+ %% Linger is 3 s so the Closer will finish this send:
+ ct:pal("Sending ~p bytes with 10 s timeout~n", [Datasize]),
+ {error,closed} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes with 10 s timeout was closed~n",
+ [Datasize]),
+ normal = wait(CloserMref),
+ ct:pal("The Closer has exited~n"),
+ Listener ! {Tester,stop},
+ receive {'DOWN',ListenerMref,_,_,_} -> ok end,
+ ct:pal("The Listener has exited~n"),
+ test_server:stop_node(ListenerNode),
+ ok
+ end.
+
+wait(Mref) ->
+ receive {'DOWN',Mref,_,_,Reason} -> Reason end.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
index b7ccba8e70..d831d0d108 100644
--- a/lib/kernel/test/logger_SUITE.erl
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -246,6 +246,18 @@ change_config(_Config) ->
{ok,C4} = logger:get_handler_config(h1),
C4 = C3#{custom:=new_custom},
+ %% Change handler config: Id and module can not be changed
+ {error,{illegal_config_change,Old,New}} =
+ logger:set_handler_config(h1,id,newid),
+ %% Check that only the faulty field is included in return
+ [{id,h1}] = maps:to_list(Old),
+ [{id,newid}] = maps:to_list(New),
+ %% Check that both fields are included when both are changed
+ {error,{illegal_config_change,
+ #{id:=h1,module:=?MODULE},
+ #{id:=newid,module:=newmodule}}} =
+ logger:set_handler_config(h1,#{id=>newid,module=>newmodule}),
+
%% Change primary config: Single key
PConfig0 = logger:get_primary_config(),
ok = logger:set_primary_config(level,warning),
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
index a815db14e9..905c2c52c5 100644
--- a/lib/kernel/test/logger_disk_log_h_SUITE.erl
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -92,6 +92,7 @@ all() ->
disk_log_opts,
default_formatter,
logging,
+ filter_config,
errors,
formatter_fail,
config_fail,
@@ -302,6 +303,20 @@ logging(cleanup, _Config) ->
Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
remove_and_stop(Name).
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([handler_pid,mode_tab],HConfig),
+
+ FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()},
+ #{config:=HConfig} =
+ logger_disk_log_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
errors(Config) ->
PrivDir = ?config(priv_dir,Config),
Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
@@ -316,13 +331,29 @@ errors(Config) ->
%%! TODO:
%%! Check how bad log_opts are handled!
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(Name1,
- config,
- #{file=>LogFile1,
- type=>halt}),
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(Name1,id,new),
+ {error,{illegal_config_change,
+ #{config:=#{type:=wrap}},
+ #{config:=#{type:=halt}}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{type=>halt,
+ file=>LogFile1}),
+
+ {error,{illegal_config_change,
+ #{config:=#{file:=LogFile1}},
+ #{config:=#{file:="newfilename"}}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{file=>"newfilename"}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(Name1),
+ ok = logger:set_handler_config(Name1,config,
+ #{handler_pid=>self(),
+ mode_tab=>erlang:make_ref()}),
+ {ok,C} = logger:get_handler_config(Name1),
+
ok = logger:remove_handler(Name1),
{error,{not_found,Name1}} = logger:remove_handler(Name1),
@@ -403,21 +434,21 @@ config_fail(_Config) ->
formatter=>{?MODULE,self()}}),
%% can't change the disk log options for a log already in use
{error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,config,
- #{max_no_files=>2}),
+ logger:update_handler_config(?MODULE,config,
+ #{max_no_files=>2}),
%% can't change name of an existing handler
{error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,id,bad),
+ logger:update_handler_config(?MODULE,id,bad),
%% incorrect values of OP params
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
{error,{invalid_levels,_}} =
- logger:set_handler_config(?MODULE,config,
- HConfig#{sync_mode_qlen=>100,
- flush_qlen=>99}),
+ logger:update_handler_config(?MODULE,config,
+ HConfig#{sync_mode_qlen=>100,
+ flush_qlen=>99}),
%% invalid name of config parameter
{error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} =
- logger:set_handler_config(?MODULE, config,
- HConfig#{filesync_rep_int => 2000}),
+ logger:update_handler_config(?MODULE, config,
+ HConfig#{filesync_rep_int => 2000}),
ok.
config_fail(cleanup,_Config) ->
logger:remove_handler(?MODULE).
@@ -459,10 +490,26 @@ reconfig(Config) ->
log_opts := #{type := ?DISK_LOG_TYPE,
max_no_files := ?DISK_LOG_MAX_NO_FILES,
max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
- file := _DiskLogFile}} =
+ file := DiskLogFile}} =
logger_disk_log_h:info(?MODULE),
+ {ok,#{config :=
+ #{sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
+ file := DiskLogFile,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ type := wrap} = HConfig0}} =
+ logger:get_handler_config(?MODULE),
- {ok,#{config := HConfig0}} = logger:get_handler_config(?MODULE),
HConfig1 = HConfig0#{sync_mode_qlen => 1,
drop_mode_qlen => 2,
flush_qlen => 3,
@@ -488,6 +535,29 @@ reconfig(Config) ->
overload_kill_restart_after := infinity,
filesync_repeat_interval := no_repeat} =
logger_disk_log_h:info(?MODULE),
+ {ok,#{config:=HConfig1}} = logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConfig1#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = HConfig0#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = HConfig0#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = HConfig0#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
ok = logger:remove_handler(?MODULE),
@@ -507,6 +577,43 @@ reconfig(Config) ->
max_no_bytes := 1024,
file := File}} =
logger_disk_log_h:info(?MODULE),
+ {ok,#{config :=
+ #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}=HaltHConfig} = Config2} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, level, notice),
+ {ok,C5} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = Config2#{level => notice},
+
+ ok = logger:set_handler_config(?MODULE, level, info),
+ {ok,C6} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = Config2#{level => info},
+
+ %% You are not allowed to actively set the write once fields
+ %% (type, max_no_files, max_no_bytes, file) in runtime.
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>wrap}),
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_files=>2}),
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}),
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}),
+ {ok,C7} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = C6,
+
+ %% ... but if you don't specify the write once fields, then
+ %% set_handler_config shall NOT reset them to their default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C8}} = logger:get_handler_config(?MODULE),
+ ct:log("C8: ~p",[C8]),
+ C8 = HaltHConfig#{sync_mode_qlen=>1},
ok.
reconfig(cleanup, _Config) ->
@@ -536,7 +643,7 @@ sync(Config) ->
%% a disk_log_sync is still performed when handler goes idle
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
HConfig1 = HConfig#{filesync_repeat_interval => no_repeat},
- ok = logger:set_handler_config(?MODULE, config, HConfig1),
+ ok = logger:update_handler_config(?MODULE, config, HConfig1),
no_repeat = maps:get(filesync_repeat_interval,
logger_disk_log_h:info(?MODULE)),
@@ -569,13 +676,13 @@ sync(Config) ->
[OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
HConfig2 = HConfig#{filesync_repeat_interval => SyncInt},
- ok = logger:set_handler_config(?MODULE, config, HConfig2),
+ ok = logger:update_handler_config(?MODULE, config, HConfig2),
SyncInt = maps:get(filesync_repeat_interval,
logger_disk_log_h:info(?MODULE)),
timer:sleep(WaitT),
HConfig3 = HConfig#{filesync_repeat_interval => no_repeat},
- ok = logger:set_handler_config(?MODULE, config, HConfig3),
+ ok = logger:update_handler_config(?MODULE, config, HConfig3),
check_tracer(100),
ok.
sync(cleanup,_Config) ->
@@ -797,7 +904,7 @@ sync_failure(Config) ->
LogOpts = maps:get(log_opts, HState),
SyncInt = 500,
- ok = rpc:call(Node, logger, set_handler_config,
+ ok = rpc:call(Node, logger, update_handler_config,
[?STANDARD_HANDLER, config,
#{filesync_repeat_interval => SyncInt}]),
Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
@@ -872,7 +979,7 @@ op_switch_to_sync(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Lines = count_lines(Log),
NumOfReqs = Lines,
@@ -897,7 +1004,7 @@ op_switch_to_drop(Config) ->
drop_mode_qlen => 2,
flush_qlen => Procs*NumOfReqs*Bursts,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% It sometimes happens that the handler either gets
%% the requests in a slow enough pace so that dropping
%% never occurs. Therefore, lets generate a number of
@@ -943,7 +1050,7 @@ op_switch_to_flush(Config) ->
drop_mode_qlen => 300,
flush_qlen => 300,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1500,
Procs = 10,
Bursts = 10,
@@ -985,7 +1092,7 @@ limit_burst_disabled(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1005,7 +1112,7 @@ limit_burst_enabled_one(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1026,7 +1133,7 @@ limit_burst_enabled_period(Config) ->
burst_limit_window_time => BurstTWin,
drop_mode_qlen => 20000,
flush_qlen => 20001}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Windows = 3,
Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
@@ -1046,7 +1153,7 @@ kill_disabled(Config) ->
HConfig#{config=>DLHConfig#{overload_kill_enable=>false,
overload_kill_qlen=>10,
overload_kill_mem_size=>100}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1068,7 +1175,7 @@ qlen_kill_new(Config) ->
overload_kill_qlen=>10,
overload_kill_mem_size=>Mem0+50000,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1105,7 +1212,7 @@ mem_kill_new(Config) ->
overload_kill_qlen=>50000,
overload_kill_mem_size=>Mem0+500,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1139,7 +1246,7 @@ restart_after(Config) ->
HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>infinity}},
- ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
MRef1 = erlang:monitor(process, whereis(h_proc_name())),
%% kill handler
send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
@@ -1161,7 +1268,7 @@ restart_after(Config) ->
HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
Pid0 = whereis(h_proc_name()),
MRef2 = erlang:monitor(process, Pid0),
%% kill handler
@@ -1194,7 +1301,7 @@ handler_requests_under_load(Config) ->
drop_mode_qlen => 1000,
flush_qlen => 2000,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
{info,[]},
{reset,[]},
@@ -1227,9 +1334,9 @@ send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
Result =
case Req of
change_config ->
- logger:set_handler_config(HName, logger_disk_log_h,
- #{overload_kill_enable =>
- false});
+ logger:update_handler_config(HName, logger_disk_log_h,
+ #{overload_kill_enable =>
+ false});
Func ->
logger_disk_log_h:Func(HName)
end,
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index 3426567bbf..b6a09f4980 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -108,6 +108,7 @@ all() ->
add_remove_instance_file1,
add_remove_instance_file2,
default_formatter,
+ filter_config,
errors,
formatter_fail,
config_fail,
@@ -204,6 +205,20 @@ default_formatter(_Config) ->
match = re:run(Msg,"=NOTICE REPORT====.*\n"++M1,[{capture,none}]),
ok.
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([handler_pid,mode_tab],HConfig),
+
+ FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()},
+ #{config:=HConfig} =
+ logger_std_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
errors(Config) ->
Dir = ?config(priv_dir,Config),
Log = filename:join(Dir,?FUNCTION_NAME),
@@ -319,11 +334,10 @@ config_fail(_Config) ->
ok = logger:add_handler(?MODULE,logger_std_h,
#{filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{illegal_config_change,_,_}} =
+ {error,{illegal_config_change,#{config:=#{type:=_}},#{config:=#{type:=_}}}} =
logger:set_handler_config(?MODULE,config,
#{type=>{file,"file"}}),
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,id,bad),
+
{error,{invalid_levels,_}} =
logger:set_handler_config(?MODULE,config,
#{sync_mode_qlen=>100,
@@ -331,6 +345,15 @@ config_fail(_Config) ->
{error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} =
logger:set_handler_config(?MODULE, config,
#{filesync_rep_int => 2000}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(?MODULE),
+ ok = logger:set_handler_config(?MODULE,config,
+ #{handler_pid=>self(),
+ mode_tab=>erlang:make_ref()}),
+ {ok,C} = logger:get_handler_config(?MODULE),
+
ok.
config_fail(cleanup,_Config) ->
@@ -457,9 +480,26 @@ reconfig(Config) ->
overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} =
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = DefaultInfo =
logger_std_h:info(?MODULE),
+ {ok,
+ #{config:=
+ #{type := standard_io,
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} =
+ DefaultHConf}}
+ = logger:get_handler_config(?MODULE),
+
ok = logger:set_handler_config(?MODULE, config,
#{sync_mode_qlen => 1,
drop_mode_qlen => 2,
@@ -485,7 +525,77 @@ reconfig(Config) ->
overload_kill_qlen := 100000,
overload_kill_mem_size := 10000000,
overload_kill_restart_after := infinity,
- filesync_repeat_interval := no_repeat} = logger_std_h:info(?MODULE),
+ filesync_repeat_interval := no_repeat} = Info = logger_std_h:info(?MODULE),
+
+ {ok,#{config :=
+ #{type := standard_io,
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity,
+ filesync_repeat_interval := no_repeat} = HConf}} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConf#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = DefaultHConf#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = DefaultHConf#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = DefaultHConf#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir,lists:concat([?FUNCTION_NAME,".log"])),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => {file,File}},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ {ok,#{config:=#{filesync_repeat_interval:=FSI}=FileHConfig}} =
+ logger:get_handler_config(?MODULE),
+ ok = logger:update_handler_config(?MODULE,config,
+ #{filesync_repeat_interval=>FSI+2000}),
+ {ok,#{config:=C5}} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = FileHConfig#{filesync_repeat_interval=>FSI+2000},
+
+ %% You are not allowed to actively set 'type' in runtime, since
+ %% this is a write once field.
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>standard_io}),
+ {ok,#{config:=C6}} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = C5,
+
+ %% ... but if you don't specify 'type', then set_handler_config shall
+ %% NOT reset it to its default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C7}} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = FileHConfig#{sync_mode_qlen=>1},
ok.
reconfig(cleanup, _Config) ->
@@ -561,8 +671,8 @@ sync(Config) ->
%% check that if there's no repeated filesync active,
%% a filesync is still performed when handler goes idle
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => no_repeat}),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
%% The following timer is to make sure the time from last log
%% ("second") to next ("third") is long enough, so the a flush is
@@ -592,12 +702,12 @@ sync(Config) ->
start_tracer([{logger_std_h,handle_cast,2}],
[OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => SyncInt}),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => SyncInt}),
SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
timer:sleep(WaitT),
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => no_repeat}),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
check_tracer(100),
ok.
sync(cleanup, _Config) ->
@@ -652,7 +762,7 @@ sync_failure(Config) ->
rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
SyncInt = 500,
- ok = rpc:call(Node, logger, set_handler_config,
+ ok = rpc:call(Node, logger, update_handler_config,
[?STANDARD_HANDLER, config,
#{filesync_repeat_interval => SyncInt}]),
Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]),
@@ -718,7 +828,7 @@ op_switch_to_sync_file(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% TRecvPid = start_op_trace(),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Lines = count_lines(Log),
@@ -747,7 +857,7 @@ op_switch_to_sync_tty(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
ok.
op_switch_to_sync_tty(cleanup, _Config) ->
@@ -770,7 +880,7 @@ op_switch_to_drop_file(Config) ->
flush_qlen =>
Procs*NumOfReqs*Bursts,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% It sometimes happens that the handler gets the
%% requests in a slow enough pace so that dropping
%% never occurs. Therefore, lets generate a number of
@@ -807,7 +917,7 @@ op_switch_to_drop_tty(Config) ->
flush_qlen =>
Procs*NumOfReqs+1,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
ok.
op_switch_to_drop_tty(cleanup, _Config) ->
@@ -832,7 +942,7 @@ op_switch_to_flush_file(Config) ->
drop_mode_qlen => 300,
flush_qlen => 300,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1500,
Procs = 10,
Bursts = 10,
@@ -879,7 +989,7 @@ op_switch_to_flush_tty(Config) ->
drop_mode_qlen => 100,
flush_qlen => 100,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1000,
Procs = 100,
send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
@@ -895,7 +1005,7 @@ limit_burst_disabled(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -915,7 +1025,7 @@ limit_burst_enabled_one(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -936,7 +1046,7 @@ limit_burst_enabled_period(Config) ->
burst_limit_window_time => BurstTWin,
drop_mode_qlen => 20000,
flush_qlen => 20001}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Windows = 3,
Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
@@ -956,7 +1066,7 @@ kill_disabled(Config) ->
HConfig#{config=>StdHConfig#{overload_kill_enable=>false,
overload_kill_qlen=>10,
overload_kill_mem_size=>100}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -977,7 +1087,7 @@ qlen_kill_new(Config) ->
overload_kill_qlen=>10,
overload_kill_mem_size=>Mem0+50000,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1011,7 +1121,7 @@ qlen_kill_std(_Config) ->
%% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
%% Log = filename:join(Dir, File),
%% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
- %% ok = rpc:call(Node, logger, set_handler_config,
+ %% ok = rpc:call(Node, logger, update_handler_config,
%% [?STANDARD_HANDLER, config,
%% #{overload_kill_enable=>true,
%% overload_kill_qlen=>10,
@@ -1028,7 +1138,7 @@ mem_kill_new(Config) ->
overload_kill_qlen=>50000,
overload_kill_mem_size=>Mem0+500,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1067,7 +1177,7 @@ restart_after(Config) ->
HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>infinity}},
- ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
MRef1 = erlang:monitor(process, whereis(h_proc_name())),
%% kill handler
send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
@@ -1082,14 +1192,15 @@ restart_after(Config) ->
ct:pal("Handler state = ~p", [Info1]),
ct:fail("Handler not dead! It should not have survived this!")
end,
-
+
{Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+
NewHConfig2 =
HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
Pid0 = whereis(h_proc_name()),
MRef2 = erlang:monitor(process, Pid0),
%% kill handler
@@ -1123,7 +1234,7 @@ handler_requests_under_load(Config) ->
drop_mode_qlen => 1000,
flush_qlen => 2000,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
{info,[]},
{reset,[]},
@@ -1155,9 +1266,9 @@ send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
Result =
case Req of
change_config ->
- logger:set_handler_config(HName, config,
- #{overload_kill_enable =>
- false});
+ logger:update_handler_config(HName, config,
+ #{overload_kill_enable =>
+ false});
Func ->
logger_std_h:Func(HName)
end,
diff --git a/lib/observer/src/Makefile b/lib/observer/src/Makefile
index ff2bcbdb99..f9f239db37 100644
--- a/lib/observer/src/Makefile
+++ b/lib/observer/src/Makefile
@@ -50,6 +50,7 @@ MODULES= \
cdv_mem_cb \
cdv_mod_cb \
cdv_multi_wx \
+ cdv_persistent_cb \
cdv_port_cb \
cdv_proc_cb \
cdv_sched_cb \
diff --git a/lib/observer/src/cdv_html_wx.erl b/lib/observer/src/cdv_html_wx.erl
index d9efa7fc2f..ffef83227c 100644
--- a/lib/observer/src/cdv_html_wx.erl
+++ b/lib/observer/src/cdv_html_wx.erl
@@ -33,13 +33,17 @@
{panel,
app, %% which tool is the user
expand_table,
- expand_wins=[]}).
+ expand_wins=[],
+ delayed_fetch,
+ trunc_warn=[]}).
start_link(ParentWin, Info) ->
wx_object:start_link(?MODULE, [ParentWin, Info], []).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+init([ParentWin, Callback]) when is_atom(Callback) ->
+ init(ParentWin, Callback);
init([ParentWin, {App, Fun}]) when is_function(Fun) ->
init([ParentWin, {App, Fun()}]);
init([ParentWin, {expand,HtmlText,Tab}]) ->
@@ -60,9 +64,29 @@ init(ParentWin, HtmlText, Tab, App) ->
wx_misc:endBusyCursor(),
{HtmlWin, #state{panel=HtmlWin,expand_table=Tab,app=App}}.
+init(ParentWin, Callback) ->
+ {HtmlWin, State} = init(ParentWin, "", undefined, cdv),
+ {HtmlWin, State#state{delayed_fetch=Callback}}.
+
%%%%%%%%%%%%%%%%%%%%%%% Callbacks %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+handle_info(active, #state{panel=HtmlWin,delayed_fetch=Callback}=State)
+ when Callback=/=undefined ->
+ observer_lib:display_progress_dialog(HtmlWin,
+ "Crashdump Viewer",
+ "Reading data"),
+ {{expand,HtmlText,Tab},TW} = Callback:get_info(),
+ observer_lib:sync_destroy_progress_dialog(),
+ wx_misc:beginBusyCursor(),
+ wxHtmlWindow:setPage(HtmlWin,HtmlText),
+ cdv_wx:set_status(TW),
+ wx_misc:endBusyCursor(),
+ {noreply, State#state{expand_table=Tab,
+ delayed_fetch=undefined,
+ trunc_warn=TW}};
+
handle_info(active, State) ->
+ cdv_wx:set_status(State#state.trunc_warn),
{noreply, State};
handle_info(Info, State) ->
diff --git a/lib/observer/src/cdv_persistent_cb.erl b/lib/observer/src/cdv_persistent_cb.erl
new file mode 100644
index 0000000000..d5da18f7fc
--- /dev/null
+++ b/lib/observer/src/cdv_persistent_cb.erl
@@ -0,0 +1,32 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+
+-module(cdv_persistent_cb).
+
+-export([get_info/0]).
+
+-include_lib("wx/include/wx.hrl").
+
+get_info() ->
+ Tab = ets:new(pt_expand,[set,public]),
+ {ok,PT,TW} = crashdump_viewer:persistent_terms(),
+ {{expand,
+ observer_html_lib:expandable_term("Persistent Terms",PT,Tab),
+ Tab},
+ TW}.
diff --git a/lib/observer/src/cdv_wx.erl b/lib/observer/src/cdv_wx.erl
index 78a897111c..1e9cef8952 100644
--- a/lib/observer/src/cdv_wx.erl
+++ b/lib/observer/src/cdv_wx.erl
@@ -51,6 +51,7 @@
-define(DIST_STR, "Nodes").
-define(MOD_STR, "Modules").
-define(MEM_STR, "Memory").
+-define(PERSISTENT_STR, "Persistent Terms").
-define(INT_STR, "Internal Tables").
%% Records
@@ -74,6 +75,7 @@
dist_panel,
mod_panel,
mem_panel,
+ persistent_panel,
int_panel,
active_tab
}).
@@ -193,6 +195,10 @@ setup(#state{frame=Frame, notebook=Notebook}=State) ->
%% Memory Panel
MemPanel = add_page(Notebook, ?MEM_STR, cdv_multi_wx, cdv_mem_cb),
+ %% Persistent Terms Panel
+ PersistentPanel = add_page(Notebook, ?PERSISTENT_STR,
+ cdv_html_wx, cdv_persistent_cb),
+
%% Memory Panel
IntPanel = add_page(Notebook, ?INT_STR, cdv_multi_wx, cdv_int_tab_cb),
@@ -215,6 +221,7 @@ setup(#state{frame=Frame, notebook=Notebook}=State) ->
dist_panel = DistPanel,
mod_panel = ModPanel,
mem_panel = MemPanel,
+ persistent_panel = PersistentPanel,
int_panel = IntPanel,
active_tab = GenPid
}}.
@@ -250,6 +257,7 @@ handle_event(#wx{id = ?wxID_OPEN,
State#state.dist_panel,
State#state.mod_panel,
State#state.mem_panel,
+ State#state.persistent_panel,
State#state.int_panel],
_ = [wx_object:call(Panel,new_dump) || Panel<-Panels],
wxNotebook:setSelection(State#state.notebook,0),
@@ -343,8 +351,8 @@ check_page_title(Notebook) ->
get_active_pid(#state{notebook=Notebook, gen_panel=Gen, pro_panel=Pro,
port_panel=Ports, ets_panel=Ets, timer_panel=Timers,
fun_panel=Funs, atom_panel=Atoms, dist_panel=Dist,
- mod_panel=Mods, mem_panel=Mem, int_panel=Int,
- sched_panel=Sched
+ mod_panel=Mods, mem_panel=Mem, persistent_panel=Persistent,
+ int_panel=Int, sched_panel=Sched
}) ->
Panel = case check_page_title(Notebook) of
?GEN_STR -> Gen;
@@ -358,6 +366,7 @@ get_active_pid(#state{notebook=Notebook, gen_panel=Gen, pro_panel=Pro,
?DIST_STR -> Dist;
?MOD_STR -> Mods;
?MEM_STR -> Mem;
+ ?PERSISTENT_STR -> Persistent;
?INT_STR -> Int
end,
wx_object:get_pid(Panel).
@@ -365,7 +374,7 @@ get_active_pid(#state{notebook=Notebook, gen_panel=Gen, pro_panel=Pro,
pid2panel(Pid, #state{gen_panel=Gen, pro_panel=Pro, port_panel=Ports,
ets_panel=Ets, timer_panel=Timers, fun_panel=Funs,
atom_panel=Atoms, dist_panel=Dist, mod_panel=Mods,
- mem_panel=Mem, int_panel=Int}) ->
+ mem_panel=Mem, persistent_panel=Persistent, int_panel=Int}) ->
case Pid of
Gen -> ?GEN_STR;
Pro -> ?PRO_STR;
@@ -377,6 +386,7 @@ pid2panel(Pid, #state{gen_panel=Gen, pro_panel=Pro, port_panel=Ports,
Dist -> ?DIST_STR;
Mods -> ?MOD_STR;
Mem -> ?MEM_STR;
+ ?PERSISTENT_STR -> Persistent;
Int -> ?INT_STR;
_ -> "unknown"
end.
diff --git a/lib/observer/src/crashdump_viewer.erl b/lib/observer/src/crashdump_viewer.erl
index 14b086ff58..0627c15b1c 100644
--- a/lib/observer/src/crashdump_viewer.erl
+++ b/lib/observer/src/crashdump_viewer.erl
@@ -74,6 +74,7 @@
loaded_modules/0,
loaded_mod_details/1,
memory/0,
+ persistent_terms/0,
allocated_areas/0,
allocator_info/0,
hash_tables/0,
@@ -139,6 +140,7 @@
-define(node,node).
-define(not_connected,not_connected).
-define(old_instr_data,old_instr_data).
+-define(persistent_terms,persistent_terms).
-define(port,port).
-define(proc,proc).
-define(proc_dictionary,proc_dictionary).
@@ -293,6 +295,8 @@ loaded_mod_details(Mod) ->
call({loaded_mod_details,Mod}).
memory() ->
call(memory).
+persistent_terms() ->
+ call(persistent_terms).
allocated_areas() ->
call(allocated_areas).
allocator_info() ->
@@ -471,6 +475,11 @@ handle_call(memory,_From,State=#state{file=File}) ->
Memory=memory(File),
TW = truncated_warning([?memory]),
{reply,{ok,Memory,TW},State};
+handle_call(persistent_terms,_From,State=#state{file=File,dump_vsn=DumpVsn}) ->
+ TW = truncated_warning([?persistent_terms,?literals]),
+ DecodeOpts = get_decode_opts(DumpVsn),
+ Terms = persistent_terms(File, DecodeOpts),
+ {reply,{ok,Terms,TW},State};
handle_call(allocated_areas,_From,State=#state{file=File}) ->
AllocatedAreas=allocated_areas(File),
TW = truncated_warning([?allocated_areas]),
@@ -1444,15 +1453,7 @@ maybe_other_node2(Channel) ->
expand_memory(Fd,Pid,DumpVsn) ->
DecodeOpts = get_decode_opts(DumpVsn),
put(fd,Fd),
- Dict0 = case get(?literals) of
- undefined ->
- Literals = read_literals(Fd,DecodeOpts),
- put(?literals,Literals),
- put(fd,Fd),
- Literals;
- Literals ->
- Literals
- end,
+ Dict0 = get_literals(Fd,DecodeOpts),
Dict = read_heap(Fd,Pid,DecodeOpts,Dict0),
Expanded = {read_stack_dump(Fd,Pid,DecodeOpts,Dict),
read_messages(Fd,Pid,DecodeOpts,Dict),
@@ -1468,6 +1469,18 @@ expand_memory(Fd,Pid,DumpVsn) ->
end,
{Expanded,IncompleteWarning}.
+get_literals(Fd,DecodeOpts) ->
+ case get(?literals) of
+ undefined ->
+ OldFd = put(fd,Fd),
+ Literals = read_literals(Fd,DecodeOpts),
+ put(fd,OldFd),
+ put(?literals,Literals),
+ Literals;
+ Literals ->
+ Literals
+ end.
+
read_literals(Fd,DecodeOpts) ->
case lookup_index(?literals,[]) of
[{_,Start}] ->
@@ -1594,31 +1607,92 @@ read_heap(Fd,Pid,DecodeOpts,Dict0) ->
Dict0
end.
-read_heap(DecodeOpts,Dict0) ->
- %% This function is never called if the dump is truncated in {?proc_heap,Pid}
- case get(fd) of
- end_of_heap ->
+read_heap(DecodeOpts, Dict0) ->
+ %% This function is never called if the dump is truncated in
+ %% {?proc_heap,Pid}.
+ %%
+ %% It is not always possible to reconstruct the heap terms
+ %% in a single pass, especially if maps are involved.
+ %% See crashdump_helper:literal_map/0 for an example.
+ %%
+ %% Therefore, we need two passes. In the first pass
+ %% we collect all lines without parsing them, and in the
+ %% second pass we parse them.
+ %%
+ %% The first pass follows.
+
+ Lines0 = read_heap_lines(),
+
+ %% Save a map of all unprocessed lines so that deref_ptr() can
+ %% access any line when there are references to terms not yet
+ %% built.
+
+ LineMap = maps:from_list(Lines0),
+ put(line_map, LineMap),
+
+ %% Refc binaries (tag "Yc") must be processed before any sub
+ %% binaries (tag "Ys") referencing them, so we make sure to
+ %% process all the refc binaries first.
+ %%
+ %% The other lines can be processed in any order, but processing
+ %% them in the reverse order compared to how they are printed in
+ %% the crash dump seems to minimize the number of references to
+ %% terms that have not yet been built. That happens to be the
+ %% order of the line list as returned by read_heap_lines/0.
+
+ RefcBins = [Refc || {_,<<"Yc",_/binary>>}=Refc <- Lines0],
+ Lines = RefcBins ++ Lines0,
+
+ %% Second pass.
+
+ init_progress("Processing terms", map_size(LineMap)),
+ Dict = parse_heap_terms(Lines, DecodeOpts, Dict0),
+ erase(line_map),
+ end_progress(),
+ Dict.
+
+read_heap_lines() ->
+ read_heap_lines_1(get(fd), []).
+
+read_heap_lines_1(Fd, Acc) ->
+ case bytes(Fd) of
+ "=" ++ _next_tag ->
end_progress(),
- Dict0;
- Fd ->
- case bytes(Fd) of
- "=" ++ _next_tag ->
- end_progress(),
- put(fd, end_of_heap),
- Dict0;
- Line ->
- update_progress(length(Line)+1),
- Dict = parse(Line,DecodeOpts,Dict0),
- read_heap(DecodeOpts,Dict)
- end
+ put(fd, end_of_heap),
+ Acc;
+ Line0 ->
+ update_progress(length(Line0)+1),
+ {Addr,":"++Line1} = get_hex(Line0),
+
+ %% Reduce the memory consumption by converting the
+ %% line to a binary. Measurements show that it may also
+ %% be benefical for performance, too, because it makes the
+ %% garbage collections cheaper.
+
+ Line = list_to_binary(Line1),
+ read_heap_lines_1(Fd, [{Addr,Line}|Acc])
end.
-parse(Line0, DecodeOpts, Dict0) ->
- {Addr,":"++Line1} = get_hex(Line0),
- {_Term,Line,Dict} = parse_heap_term(Line1, Addr, DecodeOpts, Dict0),
- [] = skip_blanks(Line),
+parse_heap_terms([{Addr,Line0}|T], DecodeOpts, Dict0) ->
+ case gb_trees:is_defined(Addr, Dict0) of
+ true ->
+ %% Already parsed (by a recursive call from do_deref_ptr()
+ %% to parse_line()). Nothing to do.
+ parse_heap_terms(T, DecodeOpts, Dict0);
+ false ->
+ %% Parse this previously unparsed term.
+ Dict = parse_line(Addr, Line0, DecodeOpts, Dict0),
+ parse_heap_terms(T, DecodeOpts, Dict)
+ end;
+parse_heap_terms([], _DecodeOpts, Dict) ->
Dict.
+parse_line(Addr, Line0, DecodeOpts, Dict0) ->
+ update_progress(1),
+ Line1 = binary_to_list(Line0),
+ {_Term,Line,Dict} = parse_heap_term(Line1, Addr, DecodeOpts, Dict0),
+ [] = skip_blanks(Line), %Assertion.
+ Dict.
%%-----------------------------------------------------------------
%% Page with one port
@@ -2142,6 +2216,56 @@ get_atom(Atom) when is_binary(Atom) ->
{Atom,nq}. % not quoted
%%-----------------------------------------------------------------
+%% Page with list of all persistent terms
+persistent_terms(File, DecodeOpts) ->
+ case lookup_index(?persistent_terms) of
+ [{_Id,Start}] ->
+ Fd = open(File),
+ pos_bof(Fd,Start),
+ Terms = get_persistent_terms(Fd),
+ Dict = get_literals(Fd,DecodeOpts),
+ parse_persistent_terms(Terms,DecodeOpts,Dict);
+ _ ->
+ []
+ end.
+
+parse_persistent_terms([[Name0,Val0]|Terms],DecodeOpts,Dict) ->
+ {Name,_,_} = parse_term(binary_to_list(Name0),DecodeOpts,Dict),
+ {Val,_,_} = parse_term(binary_to_list(Val0),DecodeOpts,Dict),
+ [{Name,Val}|parse_persistent_terms(Terms,DecodeOpts,Dict)];
+parse_persistent_terms([],_,_) -> [].
+
+get_persistent_terms(Fd) ->
+ case get_chunk(Fd) of
+ {ok,Bin} ->
+ get_persistent_terms(Fd,Bin,[]);
+ eof ->
+ []
+ end.
+
+
+%% Persistent_Terms are written one per line in the crash dump.
+get_persistent_terms(Fd,Bin,PersistentTerms) ->
+ Bins = binary:split(Bin,<<"\n">>,[global]),
+ get_persistent_terms1(Fd,Bins,PersistentTerms).
+
+get_persistent_terms1(_Fd,[<<"=",_/binary>>|_],PersistentTerms) ->
+ PersistentTerms;
+get_persistent_terms1(Fd,[LastBin],PersistentTerms) ->
+ case get_chunk(Fd) of
+ {ok,Bin0} ->
+ get_persistent_terms(Fd,<<LastBin/binary,Bin0/binary>>,PersistentTerms);
+ eof ->
+ [get_persistent_term(LastBin)|PersistentTerms]
+ end;
+get_persistent_terms1(Fd,[Bin|Bins],Persistent_Terms) ->
+ get_persistent_terms1(Fd,Bins,[get_persistent_term(Bin)|Persistent_Terms]).
+
+get_persistent_term(Bin) ->
+ binary:split(Bin,<<"|">>).
+
+
+%%-----------------------------------------------------------------
%% Page with memory information
memory(File) ->
case lookup_index(?memory) of
@@ -2871,16 +2995,18 @@ parse_atom_translation_table(N, Line0, As) ->
deref_ptr(Ptr, Line, DecodeOpts, D) ->
- Lookup = fun(D0) ->
- gb_trees:lookup(Ptr, D0)
- end,
+ Lookup0 = fun(D0) ->
+ gb_trees:lookup(Ptr, D0)
+ end,
+ Lookup = wrap_line_map(Ptr, Lookup0),
do_deref_ptr(Lookup, Line, DecodeOpts, D).
deref_bin(Binp0, Offset, Sz, Line, DecodeOpts, D) ->
Binp = Binp0 bor DecodeOpts#dec_opts.bin_addr_adj,
- Lookup = fun(D0) ->
- lookup_binary(Binp, Offset, Sz, D0)
- end,
+ Lookup0 = fun(D0) ->
+ lookup_binary(Binp, Offset, Sz, D0)
+ end,
+ Lookup = wrap_line_map(Binp, Lookup0),
do_deref_ptr(Lookup, Line, DecodeOpts, D).
lookup_binary(Binp, Offset, Sz, D) ->
@@ -2899,26 +3025,36 @@ lookup_binary(Binp, Offset, Sz, D) ->
end
end.
+wrap_line_map(Ptr, Lookup) ->
+ wrap_line_map_1(get(line_map), Ptr, Lookup).
+
+wrap_line_map_1(#{}=LineMap, Ptr, Lookup) ->
+ fun(D) ->
+ case Lookup(D) of
+ {value,_}=Res ->
+ Res;
+ none ->
+ case LineMap of
+ #{Ptr:=Line} ->
+ {line,Ptr,Line};
+ #{} ->
+ none
+ end
+ end
+ end;
+wrap_line_map_1(undefined, _Ptr, Lookup) ->
+ Lookup.
+
do_deref_ptr(Lookup, Line, DecodeOpts, D0) ->
case Lookup(D0) of
{value,Term} ->
{Term,Line,D0};
none ->
- case get(fd) of
- end_of_heap ->
- put(incomplete_heap,true),
- {['#CDVIncompleteHeap'],Line,D0};
- Fd ->
- case bytes(Fd) of
- "="++_ ->
- put(fd, end_of_heap),
- do_deref_ptr(Lookup, Line, DecodeOpts, D0);
- L ->
- update_progress(length(L)+1),
- D = parse(L, DecodeOpts, D0),
- do_deref_ptr(Lookup, Line, DecodeOpts, D)
- end
- end
+ put(incomplete_heap, true),
+ {['#CDVIncompleteHeap'],Line,D0};
+ {line,Addr,NewLine} ->
+ D = parse_line(Addr, NewLine, DecodeOpts, D0),
+ do_deref_ptr(Lookup, Line, DecodeOpts, D)
end.
get_hex(L) ->
@@ -3119,6 +3255,7 @@ tag_to_atom("literals") -> ?literals;
tag_to_atom("loaded_modules") -> ?loaded_modules;
tag_to_atom("memory") -> ?memory;
tag_to_atom("mod") -> ?mod;
+tag_to_atom("persistent_terms") -> ?persistent_terms;
tag_to_atom("no_distribution") -> ?no_distribution;
tag_to_atom("node") -> ?node;
tag_to_atom("not_connected") -> ?not_connected;
diff --git a/lib/observer/src/observer.app.src b/lib/observer/src/observer.app.src
index d73293a5f9..d48b846ad2 100644
--- a/lib/observer/src/observer.app.src
+++ b/lib/observer/src/observer.app.src
@@ -34,6 +34,7 @@
cdv_mem_cb,
cdv_mod_cb,
cdv_multi_wx,
+ cdv_persistent_cb,
cdv_port_cb,
cdv_proc_cb,
cdv_table_wx,
diff --git a/lib/observer/src/observer_html_lib.erl b/lib/observer/src/observer_html_lib.erl
index 0c4e32af49..c67fa28c6d 100644
--- a/lib/observer/src/observer_html_lib.erl
+++ b/lib/observer/src/observer_html_lib.erl
@@ -62,7 +62,8 @@ expandable_term_body(Heading,[],_Tab) ->
"Dictionary" -> "No dictionary was found";
"ProcState" -> "Information could not be retrieved,"
" system messages may not be handled by this process.";
- "SaslLog" -> "No log entry was found"
+ "SaslLog" -> "No log entry was found";
+ "Persistent Terms" -> "No persistent terms were found"
end];
expandable_term_body(Heading,Expanded,Tab) ->
Attr = "BORDER=0 CELLPADDING=0 CELLSPACING=1 WIDTH=100%",
diff --git a/lib/observer/test/crashdump_helper.erl b/lib/observer/test/crashdump_helper.erl
index 145ff56b71..576d112154 100644
--- a/lib/observer/test/crashdump_helper.erl
+++ b/lib/observer/test/crashdump_helper.erl
@@ -21,7 +21,9 @@
-module(crashdump_helper).
-export([n1_proc/2,remote_proc/2,
dump_maps/0,create_maps/0,
- create_binaries/0,create_sub_binaries/1]).
+ create_binaries/0,create_sub_binaries/1,
+ dump_persistent_terms/0,
+ create_persistent_terms/0]).
-compile(r18).
-include_lib("common_test/include/ct.hrl").
@@ -142,4 +144,47 @@ create_maps() ->
Map3 = lists:foldl(fun(I, A) ->
A#{I=>I*I}
end, Map2, lists:seq(-10, 0)),
- #{a=>Map0,b=>Map1,c=>Map2,d=>Map3,e=>#{}}.
+ #{a=>Map0,b=>Map1,c=>Map2,d=>Map3,e=>#{},literal=>literal_map()}.
+
+literal_map() ->
+ %% A literal map such as the one below will produce a heap dump
+ %% like this:
+ %%
+ %% Address1:t4:H<Address3>,H<Address4>,H<Address5>,H<Address6>
+ %% Address2:Mf4:H<Adress1>:I1,I2,I3,I4
+ %% Address3: ... % "one"
+ %% Address4: ... % "two"
+ %% Address5: ... % "three"
+ %% Address6: ... % "four"
+ %%
+ %% The map cannot be reconstructed in a single sequential pass.
+ %%
+ %% To reconstruct the map, first the string keys "one"
+ %% through "four" must be reconstructed, then the tuple at
+ %% Adress1, then the map at Address2.
+
+ #{"one"=>1,"two"=>2,"three"=>3,"four"=>4}.
+
+%%%
+%%% Test dumping of persistent terms (from OTP 21.2).
+%%%
+
+dump_persistent_terms() ->
+ Parent = self(),
+ F = fun() ->
+ register(aaaaaaaa_persistent_terms, self()),
+ put(pts, create_persistent_terms()),
+ Parent ! {self(),done},
+ receive _ -> ok end
+ end,
+ Pid = spawn_link(F),
+ receive
+ {Pid,done} ->
+ {ok,Pid}
+ end.
+
+create_persistent_terms() ->
+ persistent_term:put({?MODULE,first}, {pid,42.0}),
+ persistent_term:put({?MODULE,second}, [1,2,3]),
+ persistent_term:get().
+
diff --git a/lib/observer/test/crashdump_viewer_SUITE.erl b/lib/observer/test/crashdump_viewer_SUITE.erl
index 864454cdff..ed53c46a0d 100644
--- a/lib/observer/test/crashdump_viewer_SUITE.erl
+++ b/lib/observer/test/crashdump_viewer_SUITE.erl
@@ -345,6 +345,7 @@ browse_file(File) ->
{ok,_AllocINfo,_AllocInfoTW} = crashdump_viewer:allocator_info(),
{ok,_HashTabs,_HashTabsTW} = crashdump_viewer:hash_tables(),
{ok,_IndexTabs,_IndexTabsTW} = crashdump_viewer:index_tables(),
+ {ok,_PTs,_PTsTW} = crashdump_viewer:persistent_terms(),
io:format(" info read",[]),
@@ -595,6 +596,23 @@ special(File,Procs) ->
Maps = proplists:get_value(maps,Dict),
io:format(" maps ok",[]),
ok;
+ ".persistent_terms" ->
+ %% I registered a process as aaaaaaaa_persistent_term in
+ %% the dump to make sure it will be the first in the list
+ %% when sorted on names.
+ [#proc{pid=Pid0,name=Name}|_Rest] = lists:keysort(#proc.name,Procs),
+ "aaaaaaaa_persistent_terms" = Name,
+ Pid = pid_to_list(Pid0),
+ {ok,ProcDetails=#proc{},[]} = crashdump_viewer:proc_details(Pid),
+ io:format(" process details ok",[]),
+
+ #proc{dict=Dict} = ProcDetails,
+ %% io:format("~p\n", [Dict]),
+ Pts1 = crashdump_helper:create_persistent_terms(),
+ Pts2 = proplists:get_value(pts,Dict),
+ true = lists:sort(Pts1) =:= lists:sort(Pts2),
+ io:format(" persistent terms ok",[]),
+ ok;
_ ->
ok
end,
@@ -679,9 +697,11 @@ do_create_dumps(DataDir,Rel) ->
CD5 = dump_with_size_limit_reached(DataDir,Rel,"trunc_bytes"),
CD6 = dump_with_unicode_atoms(DataDir,Rel,"unicode"),
CD7 = dump_with_maps(DataDir,Rel,"maps"),
+ CD8 = dump_with_persistent_terms(DataDir,Rel,"persistent_terms"),
TruncDumpMod = truncate_dump_mod(CD1),
TruncatedDumpsBinary = truncate_dump_binary(CD1),
- {[CD1,CD2,CD3,CD4,CD5,CD6,CD7,TruncDumpMod|TruncatedDumpsBinary],
+ {[CD1,CD2,CD3,CD4,CD5,CD6,CD7,CD8,
+ TruncDumpMod|TruncatedDumpsBinary],
DosDump};
_ ->
{[CD1,CD2], DosDump}
@@ -850,6 +870,16 @@ dump_with_maps(DataDir,Rel,DumpName) ->
?t:stop_node(n1),
CD.
+dump_with_persistent_terms(DataDir,Rel,DumpName) ->
+ Opt = rel_opt(Rel),
+ Pz = "-pz \"" ++ filename:dirname(code:which(?MODULE)) ++ "\"",
+ PzOpt = [{args,Pz}],
+ {ok,N1} = ?t:start_node(n1,peer,Opt ++ PzOpt),
+ {ok,_Pid} = rpc:call(N1,crashdump_helper,dump_persistent_terms,[]),
+ CD = dump(N1,DataDir,Rel,DumpName),
+ ?t:stop_node(n1),
+ CD.
+
dump(Node,DataDir,Rel,DumpName) ->
Crashdump = filename:join(DataDir, dump_prefix(Rel)++DumpName),
rpc:call(Node,os,putenv,["ERL_CRASH_DUMP",Crashdump]),
diff --git a/lib/os_mon/c_src/cpu_sup.c b/lib/os_mon/c_src/cpu_sup.c
index 17ef48c26e..c96a5c9f7c 100644
--- a/lib/os_mon/c_src/cpu_sup.c
+++ b/lib/os_mon/c_src/cpu_sup.c
@@ -152,6 +152,8 @@ static void util_measure(unsigned int **result_vec, int *result_sz);
#if defined(__sun__)
static unsigned int misc_measure(char* name);
+#elif defined(__linux__)
+static unsigned int misc_measure(char cmd);
#endif
static void sendi(unsigned int data);
static void sendv(unsigned int data[], int ints);
@@ -231,6 +233,11 @@ int main(int argc, char** argv) {
case AVG1: sendi(misc_measure("avenrun_1min")); break;
case AVG5: sendi(misc_measure("avenrun_5min")); break;
case AVG15: sendi(misc_measure("avenrun_15min")); break;
+#elif defined(__linux__)
+ case NPROCS:
+ case AVG1:
+ case AVG5:
+ case AVG15: sendi(misc_measure(cmd)); break;
#elif defined(__OpenBSD__) || (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__) || defined(__DragonFly__)
case NPROCS: bsd_count_procs(); break;
case AVG1: bsd_loadavg(0); break;
@@ -238,7 +245,7 @@ int main(int argc, char** argv) {
case AVG15: bsd_loadavg(2); break;
#endif
#if defined(__sun__) || defined(__linux__) || (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__)
- case UTIL: util_measure(&rv,&sz); sendv(rv, sz); break;
+ case UTIL: util_measure(&rv,&sz); sendv(rv, sz); break;
#endif
case QUIT: free((void*)rv); return 0;
default: error("Bad command"); break;
@@ -329,6 +336,22 @@ static void bsd_count_procs(void) {
#if defined(__linux__)
+static unsigned int misc_measure(char cmd) {
+ struct sysinfo info;
+
+ if (sysinfo(&info))
+ error(strerror(errno));
+
+ switch (cmd) {
+ case AVG1: return (unsigned int)(info.loads[0] / 256);
+ case AVG5: return (unsigned int)(info.loads[1] / 256);
+ case AVG15: return (unsigned int)(info.loads[2] / 256);
+ case NPROCS: return info.procs;
+ }
+
+ return -1;
+}
+
static cpu_t *read_procstat(FILE *fp, cpu_t *cpu) {
char buffer[BUFFERSIZE];
@@ -357,8 +380,24 @@ static void util_measure(unsigned int **result_vec, int *result_sz) {
FILE *fp;
unsigned int *rv = NULL;
cpu_t cpu;
-
+
+ rv = *result_vec;
+ rv[0] = no_of_cpus;
+
if ( (fp = fopen(PROCSTAT,"r")) == NULL) {
+ if (errno == EACCES) { /* SELinux */
+ rv[1] = 1; /* just the cpu id */
+ ++rv; /* first value is number of cpus */
+ ++rv; /* second value is number of entries */
+ for (i = 0; i < no_of_cpus; ++i) {
+ rv[0] = CU_CPU_ID;
+ rv[1] = i;
+ rv += 1*2;
+ }
+ *result_sz = 2 + 2*1 * no_of_cpus;
+ return;
+ }
+
/* Check if procfs is mounted,
* otherwise:
* try and try again, bad procsfs.
@@ -367,20 +406,19 @@ static void util_measure(unsigned int **result_vec, int *result_sz) {
return;
}
- /*ignore read*/
+ /*ignore read*/
if (fgets(buffer, BUFFERSIZE, fp) == NULL) {
*result_sz = 0;
return;
}
- rv = *result_vec;
- rv[0] = no_of_cpus;
+
rv[1] = CU_VALUES;
++rv; /* first value is number of cpus */
++rv; /* second value is number of entries */
for (i = 0; i < no_of_cpus; ++i) {
read_procstat(fp, &cpu);
-
+
rv[ 0] = CU_CPU_ID; rv[ 1] = cpu.id;
rv[ 2] = CU_USER; rv[ 3] = cpu.user;
rv[ 4] = CU_NICE_USER; rv[ 5] = cpu.nice_user;
diff --git a/lib/os_mon/src/cpu_sup.erl b/lib/os_mon/src/cpu_sup.erl
index 81e049ef22..ba2d89313e 100644
--- a/lib/os_mon/src/cpu_sup.erl
+++ b/lib/os_mon/src/cpu_sup.erl
@@ -220,17 +220,21 @@ code_change(_OldVsn, State, _Extra) ->
%% internal functions
%%----------------------------------------------------------------------
-get_uint32_measurement(Request, #internal{os_type = {unix, linux}}) ->
- {ok,F} = file:open("/proc/loadavg",[read,raw]),
- {ok,D} = file:read_line(F),
- ok = file:close(F),
- {ok,[Load1,Load5,Load15,_PRun,PTotal],_} = io_lib:fread("~f ~f ~f ~d/~d", D),
- case Request of
- ?avg1 -> sunify(Load1);
- ?avg5 -> sunify(Load5);
- ?avg15 -> sunify(Load15);
- ?ping -> 4711;
- ?nprocs -> PTotal
+get_uint32_measurement(Request, #internal{port = P, os_type = {unix, linux}}) ->
+ case file:open("/proc/loadavg",[read,raw]) of
+ {ok,F} ->
+ {ok,D} = file:read_line(F),
+ ok = file:close(F),
+ {ok,[Load1,Load5,Load15,_PRun,PTotal],_} = io_lib:fread("~f ~f ~f ~d/~d", D),
+ case Request of
+ ?avg1 -> sunify(Load1);
+ ?avg5 -> sunify(Load5);
+ ?avg15 -> sunify(Load15);
+ ?ping -> 4711;
+ ?nprocs -> PTotal
+ end;
+ {error,_} ->
+ port_server_call(P, Request)
end;
get_uint32_measurement(Request, #internal{port = P, os_type = {unix, Sys}}) when
Sys == sunos;
diff --git a/lib/public_key/asn1/OTP-PKIX.asn1 b/lib/public_key/asn1/OTP-PKIX.asn1
index 37196bb9bf..9bcd99fba3 100644
--- a/lib/public_key/asn1/OTP-PKIX.asn1
+++ b/lib/public_key/asn1/OTP-PKIX.asn1
@@ -326,8 +326,13 @@ PublicKeyAlgorithm ::= SEQUENCE {
OPTIONAL }
SupportedSignatureAlgorithms SIGNATURE-ALGORITHM-CLASS ::= {
- dsa-with-sha1 | dsaWithSHA1 | md2-with-rsa-encryption |
- md5-with-rsa-encryption | sha1-with-rsa-encryption | sha-1with-rsa-encryption |
+ dsa-with-sha1 | dsaWithSHA1 |
+ dsa-with-sha224 |
+ dsa-with-sha256 |
+ md2-with-rsa-encryption |
+ md5-with-rsa-encryption |
+ sha1-with-rsa-encryption |
+ sha-1with-rsa-encryption |
sha224-with-rsa-encryption |
sha256-with-rsa-encryption |
sha384-with-rsa-encryption |
@@ -368,6 +373,21 @@ SupportedPublicKeyAlgorithms PUBLIC-KEY-ALGORITHM-CLASS ::= {
ID id-dsaWithSHA1
TYPE DSAParams }
+ dsa-with-sha224 SIGNATURE-ALGORITHM-CLASS ::= {
+ ID id-dsa-with-sha224
+ TYPE DSAParams }
+
+ dsa-with-sha256 SIGNATURE-ALGORITHM-CLASS ::= {
+ ID id-dsa-with-sha256
+ TYPE DSAParams }
+
+ id-dsa-with-sha224 OBJECT IDENTIFIER ::= {
+ joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
+ csor(3) algorithms(4) id-dsa-with-sha2(3) 1 }
+
+ id-dsa-with-sha256 OBJECT IDENTIFIER ::= {
+ joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
+ csor(3) algorithms(4) id-dsa-with-sha2(3) 2 }
--
-- RSA Keys and Signatures
--
diff --git a/lib/public_key/src/public_key.erl b/lib/public_key/src/public_key.erl
index 3f609ce6c6..dca1e0766e 100644
--- a/lib/public_key/src/public_key.erl
+++ b/lib/public_key/src/public_key.erl
@@ -609,6 +609,10 @@ pkix_sign_types(?'id-dsa-with-sha1') ->
{sha, dsa};
pkix_sign_types(?'id-dsaWithSHA1') ->
{sha, dsa};
+pkix_sign_types(?'id-dsa-with-sha224') ->
+ {sha224, dsa};
+pkix_sign_types(?'id-dsa-with-sha256') ->
+ {sha256, dsa};
pkix_sign_types(?'ecdsa-with-SHA1') ->
{sha, ecdsa};
pkix_sign_types(?'ecdsa-with-SHA256') ->
diff --git a/lib/public_key/test/public_key_SUITE.erl b/lib/public_key/test/public_key_SUITE.erl
index 1955e9e119..878489eb0f 100644
--- a/lib/public_key/test/public_key_SUITE.erl
+++ b/lib/public_key/test/public_key_SUITE.erl
@@ -44,7 +44,9 @@ all() ->
encrypt_decrypt,
{group, sign_verify},
pkix, pkix_countryname, pkix_emailaddress, pkix_path_validation,
- pkix_iso_rsa_oid, pkix_iso_dsa_oid, pkix_crl, general_name,
+ pkix_iso_rsa_oid, pkix_iso_dsa_oid,
+ pkix_dsa_sha2_oid,
+ pkix_crl, general_name,
pkix_verify_hostname_cn,
pkix_verify_hostname_subjAltName,
pkix_verify_hostname_subjAltName_IP,
@@ -1114,6 +1116,13 @@ pkix_iso_dsa_oid(Config) when is_list(Config) ->
{_, dsa} = public_key:pkix_sign_types(SigAlg#'SignatureAlgorithm'.algorithm).
%%--------------------------------------------------------------------
+pkix_dsa_sha2_oid() ->
+ [{doc, "Test support dsa_sha2 oid"}].
+pkix_dsa_sha2_oid(Config) when is_list(Config) ->
+ {sha224, dsa} = public_key:pkix_sign_types(?'id-dsa-with-sha224'),
+ {sha256, dsa} = public_key:pkix_sign_types(?'id-dsa-with-sha256').
+
+%%--------------------------------------------------------------------
pkix_crl() ->
[{doc, "test pkix_crl_* functions"}].
diff --git a/lib/sasl/src/sasl.app.src b/lib/sasl/src/sasl.app.src
index 688aff16f1..5d45af0b50 100644
--- a/lib/sasl/src/sasl.app.src
+++ b/lib/sasl/src/sasl.app.src
@@ -43,5 +43,5 @@
{env, []},
{mod, {sasl, []}},
{runtime_dependencies, ["tools-2.6.14","stdlib-3.4","kernel-5.3",
- "erts-9.0"]}]}.
+ "erts-@OTP-13468@"]}]}.
diff --git a/lib/sasl/src/systools_make.erl b/lib/sasl/src/systools_make.erl
index 6916107623..f085246924 100644
--- a/lib/sasl/src/systools_make.erl
+++ b/lib/sasl/src/systools_make.erl
@@ -1562,10 +1562,10 @@ mandatory_modules() ->
preloaded() ->
%% Sorted
- [erl_prim_loader,erl_tracer,erlang,
+ [atomics, counters, erl_prim_loader,erl_tracer,erlang,
erts_code_purger,erts_dirty_process_signal_handler,
erts_internal,erts_literal_area_collector,
- init,otp_ring0,prim_buffer,prim_eval,prim_file,
+ init,otp_ring0,persistent_term,prim_buffer,prim_eval,prim_file,
prim_inet,prim_zip,zlib].
%%______________________________________________________________________
diff --git a/lib/ssh/doc/src/Makefile b/lib/ssh/doc/src/Makefile
index 77fa356092..4e32dd9976 100644
--- a/lib/ssh/doc/src/Makefile
+++ b/lib/ssh/doc/src/Makefile
@@ -45,6 +45,7 @@ XML_REF3_FILES = \
ssh_connection.xml \
ssh_server_channel.xml \
ssh_server_key_api.xml \
+ ssh_file.xml \
ssh_sftp.xml \
ssh_sftpd.xml \
@@ -56,8 +57,8 @@ XML_CHAPTER_FILES = \
notes.xml \
introduction.xml \
using_ssh.xml \
+ terminology.xml \
configure_algos.xml
-# ssh_protocol.xml \
BOOK_FILES = book.xml
diff --git a/lib/ssh/doc/src/notes.xml b/lib/ssh/doc/src/notes.xml
index 7e77c6a457..42bdf667f8 100644
--- a/lib/ssh/doc/src/notes.xml
+++ b/lib/ssh/doc/src/notes.xml
@@ -230,6 +230,22 @@
</section>
</section>
+<section><title>Ssh 4.6.9.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Incompatibility with newer OpenSSH fixed. Previously
+ versions 7.8 and later could cause Erlang SSH to exit.</p>
+ <p>
+ Own Id: OTP-15413</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Ssh 4.6.9.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
<list>
@@ -3869,4 +3885,3 @@
</section>
</chapter>
-
diff --git a/lib/ssh/doc/src/ref_man.xml b/lib/ssh/doc/src/ref_man.xml
index df37b0244f..60572b985b 100644
--- a/lib/ssh/doc/src/ref_man.xml
+++ b/lib/ssh/doc/src/ref_man.xml
@@ -40,6 +40,7 @@
<xi:include href="ssh_connection.xml"/>
<xi:include href="ssh_client_key_api.xml"/>
<xi:include href="ssh_server_key_api.xml"/>
+ <xi:include href="ssh_file.xml"/>
<xi:include href="ssh_sftp.xml"/>
<xi:include href="ssh_sftpd.xml"/>
</application>
diff --git a/lib/ssh/doc/src/specs.xml b/lib/ssh/doc/src/specs.xml
index acdbe2ddfd..a6517f3660 100644
--- a/lib/ssh/doc/src/specs.xml
+++ b/lib/ssh/doc/src/specs.xml
@@ -6,6 +6,7 @@
<xi:include href="../specs/specs_ssh_connection.xml"/>
<xi:include href="../specs/specs_ssh_server_channel.xml"/>
<xi:include href="../specs/specs_ssh_server_key_api.xml"/>
+ <xi:include href="../specs/specs_ssh_file.xml"/>
<xi:include href="../specs/specs_ssh_sftp.xml"/>
<xi:include href="../specs/specs_ssh_sftpd.xml"/>
</specs>
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index f238bf2ca8..8435fced11 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -99,8 +99,8 @@
</p>
<p>The paths could easily be changed by options:
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> and
- <seealso marker="#type-system_dir_daemon_option"><c>system_dir</c></seealso>.
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> and
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>.
</p>
<p>A completly different storage could be interfaced by writing call-back modules
using the behaviours
@@ -123,12 +123,12 @@
<item><c>ssh_host_ecdsa_key</c> and <c>ssh_host_ecdsa_key.pub</c></item>
</list>
<p>The host keys directory could be changed with the option
- <seealso marker="#type-system_dir_daemon_option"><c>system_dir</c></seealso>.</p>
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>.</p>
</item>
<item>Optional: one or more <i>User's public key</i> in case of <c>publickey</c> authorization.
Default is to store them concatenated in the file <c>.ssh/authorized_keys</c> in the user's home directory.
<p>The user keys directory could be changed with the option
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>.</p>
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>.</p>
</item>
</list>
</section>
@@ -138,7 +138,7 @@
<p>The keys and some other data are by default stored in files in the directory <c>.ssh</c>
in the user's home directory.</p>
<p>The directory could be changed with the option
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>.
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>.
</p>
<list>
<item>Optional: a list of <i>Host public key(s)</i> for previously connected hosts. This list
@@ -183,31 +183,6 @@
</datatype>
<datatype>
- <name name="pref_public_key_algs_client_option"/>
- <desc>
- <p>List of user (client) public key algorithms to try to use.</p>
- <p>The default value is the <c>public_key</c> entry in the list returned by
- <seealso marker="#default_algorithms/0">ssh:default_algorithms/0</seealso>.
- </p>
- <p>If there is no public key of a specified type available, the corresponding entry is ignored.
- Note that the available set is dependent on the underlying cryptolib and current user's public keys.
- </p>
- <p>See also the option <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>
- for specifying the path to the user's keys.
- </p>
- </desc>
- </datatype>
-
- <datatype>
- <name name="pubkey_passphrase_client_options"/>
- <desc>
- <p>If the user's DSA, RSA or ECDSA key is protected by a passphrase, it can be
- supplied with thoose options.
- </p>
- </desc>
- </datatype>
-
- <datatype>
<name name="host_accepting_client_options"/>
<name name="accept_hosts"/>
<name name="fp_digest_alg"/>
@@ -220,7 +195,7 @@
<p>This option guides the <c>connect</c> function on how to act when the connected server presents a Host
Key that the client has not seen before. The default is to ask the user with a question on stdio of whether to
accept or reject the new Host Key.
- See the option <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>
+ See the option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
for specifying the path to the file <c>known_hosts</c> where previously accepted Host Keys are recorded.
See also the option
<seealso marker="#type-key_cb_common_option">key_cb</seealso>
@@ -276,7 +251,7 @@
accept question the next time the same host is connected. If the option
<seealso marker="#type-key_cb_common_option"><c>key_cb</c></seealso>
is not present, the key is saved in the file "known_hosts". See option
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> for
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> for
the location of that file.
</p>
<p>If <c>false</c>, the key is not saved and the key will still be unknown
@@ -406,9 +381,20 @@
<datatype>
<name name="exec_daemon_option"/>
+ <name name="exec_spec"/>
+ <desc/>
+ </datatype>
+ <datatype>
+ <name name="exec_fun"/>
+ <desc/>
+ </datatype>
+ <datatype>
<name name="'exec_fun/1'"/>
<name name="'exec_fun/2'"/>
<name name="'exec_fun/3'"/>
+ <desc/>
+ </datatype>
+ <datatype>
<name name="exec_result"/>
<desc>
<p>This option changes how the daemon execute exec-requests from clients. The term in the return value
@@ -478,18 +464,6 @@
<name name="pwdfun_4"/>
<desc>
<taglist>
- <tag><marker id="type-system_dir_daemon_option"/><c>system_dir</c></tag>
- <item>
- <p>Sets the system directory, containing the host key files
- that identify the host keys for <c>ssh</c>. Defaults to
- <c>/etc/ssh</c>.</p>
- <p>For security reasons, this directory is normally accessible only to the root user.</p>
- <p>See also the option
- <seealso marker="#type-key_cb_common_option">key_cb</seealso>
- for the general way to handle keys.
- </p>
- </item>
-
<tag><c>auth_method_kb_interactive_data</c></tag>
<item>
<p>Sets the text strings that the daemon sends to the client for presentation to the user when
@@ -502,7 +476,7 @@
</p>
</item>
- <tag><c>user_passwords</c></tag>
+ <tag><marker id="option-user_passwords"/><c>user_passwords</c></tag>
<item>
<p>Provides passwords for password authentication. The passwords are used when someone tries
to connect to the server and public key user-authentication fails. The option provides
@@ -510,7 +484,7 @@
</p>
</item>
- <tag><c>password</c></tag>
+ <tag><marker id="option-password"/><c>password</c></tag>
<item>
<p>Provides a global password that authenticates any user.</p>
<warning>
@@ -519,7 +493,9 @@
</warning>
</item>
- <tag><c>pwdfun</c> with <c>pwdfun_4()</c></tag>
+ <tag><marker id="option-pwdfun"/><c>pwdfun</c> with
+ <seealso marker="#type-pwdfun_4"><c>pwdfun_4()</c></seealso>
+ </tag>
<item>
<p>Provides a function for password validation. This could used for calling an external system or handeling
passwords stored as hash values.
@@ -546,7 +522,9 @@
can be used for this. The return value <c>disconnect</c> is useful for this.</p>
</item>
- <tag><c>pwdfun</c> with <c>pwdfun_2()</c></tag>
+ <tag><c>pwdfun</c> with
+ <seealso marker="#type-pwdfun_2"><c>pwdfun_2()</c></seealso>
+ </tag>
<item>
<p>Provides a function for password validation. This function is called with user and password
as strings, and returns:</p>
@@ -725,21 +703,6 @@
</datatype>
<datatype>
- <name name="user_dir_common_option"/>
- <desc>
- <p>Sets the user directory. That is, the directory containing <c>ssh</c> configuration
- files for the user, such as
- <c>known_hosts</c>, <c>id_rsa</c>, <c>id_dsa</c>>, <c>id_ecdsa</c> and <c>authorized_key</c>.
- Defaults to the directory normally referred to as <c>~/.ssh</c>.
- </p>
- <p>See also the option
- <seealso marker="#type-key_cb_common_option">key_cb</seealso>
- for the general way to handle keys.
- </p>
- </desc>
- </datatype>
-
- <datatype>
<name name="profile_common_option"/>
<desc>
<p>Used together with <c>ip-address</c> and <c>port</c> to
@@ -795,7 +758,8 @@
</p>
<p>The <c>Opts</c> defaults to <c>[]</c> when only the <c>Module</c> is specified.
</p>
- <p>The default value of this option is <c>{ssh_file, []}</c>.
+ <p>The default value of this option is <c>{ssh_file, []}</c>. See also the manpage of
+ <seealso marker="ssh:ssh_file">ssh_file</seealso>.
</p>
<p>A call to the call-back function <c>F</c> will be</p>
<code>
@@ -804,13 +768,32 @@
<p>where <c>...</c> are arguments to <c>F</c> as in
<seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> and/or
<seealso marker="ssh_server_key_api">ssh_server_key_api</seealso>.
- The <c>UserOptions</c> are the options given to <c>ssh:connect</c>, <c>ssh:shell</c> or <c>ssh:daemon</c>.
+ The <c>UserOptions</c> are the options given to
+ <seealso marker="ssh:ssh#connect-3">ssh:connect</seealso>,
+ <seealso marker="ssh:ssh#shell-1">ssh:shell</seealso> or
+ <seealso marker="ssh:ssh#daemon-2">ssh:daemon</seealso>.
</p>
</desc>
</datatype>
<datatype>
+ <name name="pref_public_key_algs_common_option"/>
+ <desc>
+ <p>List of user (client) public key algorithms to try to use.</p>
+ <p>The default value is the <c>public_key</c> entry in the list returned by
+ <seealso marker="#default_algorithms/0">ssh:default_algorithms/0</seealso>.
+ </p>
+ <p>If there is no public key of a specified type available, the corresponding entry is ignored.
+ Note that the available set is dependent on the underlying cryptolib and current user's public keys.
+ </p>
+ <p>See also the option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
+ for specifying the path to the user's keys.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
<name name="disconnectfun_common_option"/>
<desc>
<p>Provides a fun to implement your own logging when the peer disconnects.</p>
diff --git a/lib/ssh/doc/src/ssh_app.xml b/lib/ssh/doc/src/ssh_app.xml
index e80bb1853d..eb804e67dc 100644
--- a/lib/ssh/doc/src/ssh_app.xml
+++ b/lib/ssh/doc/src/ssh_app.xml
@@ -74,13 +74,18 @@
<c>id_ecdsa_key</c>,
<c>known_hosts</c>, and <c>authorized_keys</c> in ~/.ssh,
and for the host key files in <c>/etc/ssh</c>. These locations can be changed
- by the options <c>user_dir</c> and <c>system_dir</c>.
+ by the options
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> and
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>.
</p>
<p>Public key handling can also be customized through a callback module that
implements the behaviors
<seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> and
<seealso marker="ssh_server_key_api">ssh_server_key_api</seealso>.
</p>
+ <p>See also the default callback module documentation in
+ <seealso marker="ssh_file">ssh_file</seealso>.
+ </p>
</section>
<section>
diff --git a/lib/ssh/doc/src/ssh_file.xml b/lib/ssh/doc/src/ssh_file.xml
new file mode 100644
index 0000000000..ae6ba2e1d9
--- /dev/null
+++ b/lib/ssh/doc/src/ssh_file.xml
@@ -0,0 +1,275 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ssh_file</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ </header>
+ <module>ssh_file</module>
+ <modulesummary>Default callback module for the client's and server's database operations in the ssh application</modulesummary>
+ <description>
+ <p>This module is the default callback handler for the client's and the server's user and host "database" operations.
+ All data, for instance key pairs, are stored in files in the normal file system. This page documents the files, where they
+ are stored and configuration options for this callback module.
+ </p>
+ <p>The intention is to be compatible with the
+ <url href="http://www.openssh.com">OpenSSH</url>
+ storage in files. Therefore it mimics directories and filenames of
+ <url href="http://www.openssh.com">OpenSSH</url>.
+ </p>
+
+ <p>Ssh_file implements the <seealso marker="ssh:ssh_server_key_api">ssh_server_key_api</seealso> and
+ the <seealso marker="ssh:ssh_client_key_api">ssh_client_key_api</seealso>.
+ This enables the user to make an own interface using for example a database handler.
+ </p>
+ <p>Such another callback module could be used by setting the option
+ <seealso marker="ssh:ssh#type-key_cb_common_option"><c>key_cb</c></seealso>
+ when starting a client or a server (with for example
+ <seealso marker="ssh:ssh#connect-3">ssh:connect</seealso>,
+ <seealso marker="ssh:ssh#daemon-2">ssh:daemon</seealso> of
+ <seealso marker="ssh:ssh#shell-1">ssh:shell</seealso>
+ ).
+ </p>
+
+ <note>
+ <p>The functions are <i>Callbacks</i> for the SSH app. They are not intended to be called from the user's code!
+ </p>
+ </note>
+ </description>
+
+ <section>
+ <title>Files, directories and who uses them</title>
+ <section>
+ <title>Daemons</title>
+ <p>Daemons uses all files stored in the <seealso marker="#SYSDIR">SYSDIR</seealso> directory.
+ </p>
+ <p>Optionaly, in case of <c>publickey</c> authorization, one or more of the remote user's public keys
+ in the <seealso marker="#USERDIR">USERDIR</seealso> directory are used.
+ See the files
+ <seealso marker="#USERDIR-authorized_keys"><c>USERDIR/authorized_keys</c></seealso> and
+ <seealso marker="#USERDIR-authorized_keys2"><c>USERDIR/authorized_keys2</c></seealso>.
+ </p>
+ </section>
+
+ <section>
+ <title>Clients</title>
+ <p>Clients uses all files stored in the <seealso marker="#USERDIR">USERDIR</seealso> directory.
+ </p>
+ </section>
+
+ <section>
+ <title>Directory contents</title>
+ <taglist>
+ <tag><marker id="LOCALUSER"/>LOCALUSER</tag>
+ <item><p>The user name of the OS process running the Erlang virtual machine (emulator).</p>
+ </item>
+
+ <tag><marker id="SYSDIR"/>SYSDIR</tag>
+ <item><p>This is the directory holding the server's files:</p>
+ <list>
+ <item><marker id="SYSDIR-ssh_host_dsa_key"/><c>ssh_host_dsa_key</c> - private dss host key (optional)</item>
+ <item><marker id="SYSDIR-ssh_host_rsa_key"/><c>ssh_host_rsa_key</c> - private rsa host key (optional)</item>
+ <item><marker id="SYSDIR-ssh_host_ecdsa_key"/><c>ssh_host_ecdsa_key</c> - private ecdsa host key (optional)</item>
+ </list>
+ <p>At least one host key must be defined. The default value of SYSDIR is <marker id="#/etc/ssh"/><c>/etc/ssh</c>.
+ </p>
+ <p>For security reasons, this directory is normally accessible only to the root user.
+ </p>
+ <p>To change the SYSDIR, see the <seealso marker="#type-system_dir_daemon_option">system_dir</seealso> option.
+ </p>
+ </item>
+
+ <tag><marker id="USERDIR"/>USERDIR</tag>
+ <item><p>This is the directory holding the files:</p>
+ <list>
+ <item><marker id="USERDIR-authorized_keys"/><c>authorized_keys</c>
+ and, as second alternative
+ <marker id="USERDIR-authorized_keys2"/><c>authorized_keys2</c> -
+ the user's public keys are stored concatenated in one of those files.
+ </item>
+ <item><marker id="USERDIR-known_hosts"/><c>known_hosts</c> - host keys from hosts visited
+ concatenated. The file is created and used by the client.</item>
+ <item><marker id="USERDIR-id_dsa"/><c>id_dsa</c> - private dss user key (optional)</item>
+ <item><marker id="USERDIR-id_rsa"/><c>id_rsa</c> - private rsa user key (optional)</item>
+ <item><marker id="USERDIR-id_ecdsa"/><c>id_ecdsa</c> - private ecdsa user key (optional)</item>
+ </list>
+ <p>The default value of USERDIR is <c>/home/</c><seealso marker="#LOCALUSER"><c>LOCALUSER</c></seealso><c>/.ssh</c>.
+ </p>
+ <p>To change the USERDIR, see the <seealso marker="#type-user_dir_common_option">user_dir</seealso> option
+ </p>
+ </item>
+ </taglist>
+ </section>
+ </section>
+
+ <datatypes>
+ <datatype_title>Options for the default ssh_file callback module</datatype_title>
+ <datatype>
+ <name name="user_dir_common_option"/>
+ <desc>
+ <p>Sets the <seealso marker="#USERDIR">user directory</seealso>.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="user_dir_fun_common_option"/>
+ <name name="user2dir"/>
+ <desc>
+ <p>Sets the <seealso marker="#USERDIR">user directory</seealso> dynamically
+ by evaluating the <c>user2dir</c> function.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="system_dir_daemon_option"/>
+ <desc>
+ <p>Sets the <seealso marker="#SYSDIR">system directory</seealso>.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="pubkey_passphrase_client_options"/>
+ <desc>
+ <p>If the user's DSA, RSA or ECDSA key is protected by a passphrase, it can be
+ supplied with thoose options.
+ </p>
+ </desc>
+ </datatype>
+
+ </datatypes>
+
+ <funcs>
+ <func>
+ <name>host_key(Algorithm, DaemonOptions) -> {ok, Key} | {error, Reason}</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_server_key_api#Module:host_key-2">ssh_server_key_api, Module:host_key/2</seealso>.
+ </p>
+ <p><strong>Options</strong></p>
+ <list>
+ <item><seealso marker="#type-system_dir_daemon_option">system_dir</seealso></item>
+ <!-- item>dsa_pass_phrase</item -->
+ <!-- item>rsa_pass_phrase</item -->
+ <!-- item>ecdsa_pass_phrase</item -->
+ </list>
+ <p><strong>Files</strong></p>
+ <list>
+ <item><seealso marker="#SYSDIR-ssh_host_rsa_key"><c>SYSDIR/ssh_host_rsa_key</c></seealso></item>
+ <item><seealso marker="#SYSDIR-ssh_host_dsa_key"><c>SYSDIR/ssh_host_dsa_key</c></seealso></item>
+ <item><seealso marker="#SYSDIR-ssh_host_ecdsa_key"><c>SYSDIR/ssh_host_ecdsa_key</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>is_auth_key(PublicUserKey, User, DaemonOptions) -> Result</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_server_key_api#Module:is_auth_key-3">ssh_server_key_api: Module:is_auth_key/3</seealso>.
+ </p>
+ <p><strong>Options</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_fun_common_option">user_dir_fun</seealso></item>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ </list>
+ <p><strong>Files</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-authorized_keys"><c>USERDIR/authorized_keys</c></seealso></item>
+ <item><seealso marker="#USERDIR-authorized_keys2"><c>USERDIR/authorized_keys2</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>add_host_key(HostNames, PublicHostKey, ConnectOptions) -> ok | {error, Reason}</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_client_key_api#Module:add_host_key-3">ssh_client_key_api, Module:add_host_key/3</seealso>.
+ </p>
+ <p><strong>Option</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ </list>
+ <p><strong>File</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-known_hosts"><c>USERDIR/known_hosts</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>is_host_key(Key, Host, Algorithm, ConnectOptions) -> Result</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_client_key_api#Module:is_host_key-4">ssh_client_key_api, Module:is_host_key/4</seealso>.
+ </p>
+ <p><strong>Option</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ </list>
+ <p><strong>File</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-known_hosts"><c>USERDIR/known_hosts</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>user_key(Algorithm, ConnectOptions) -> {ok, PrivateKey} | {error, Reason}</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_client_key_api#Module:user_key-2">ssh_client_key_api, Module:user_key/2</seealso>.
+ </p>
+ <p><strong>Options</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ <item><seealso marker="#type-pubkey_passphrase_client_options">dsa_pass_phrase</seealso></item>
+ <item><seealso marker="#type-pubkey_passphrase_client_options">rsa_pass_phrase</seealso></item>
+ <item><seealso marker="#type-pubkey_passphrase_client_options">ecdsa_pass_phrase</seealso></item>
+ </list>
+ <p><strong>Files</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-id_dsa"><c>USERDIR/id_dsa</c></seealso></item>
+ <item><seealso marker="#USERDIR-id_rsa"><c>USERDIR/id_rsa</c></seealso></item>
+ <item><seealso marker="#USERDIR-id_ecdsa"><c>USERDIR/id_ecdsa</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
diff --git a/lib/ssh/doc/src/ssh_sftp.xml b/lib/ssh/doc/src/ssh_sftp.xml
index ea55126cb3..8c105147d6 100644
--- a/lib/ssh/doc/src/ssh_sftp.xml
+++ b/lib/ssh/doc/src/ssh_sftp.xml
@@ -425,7 +425,6 @@
<type>
<v>ChannelPid = pid()</v>
<v>Handle = term()</v>
- <v>Position = integer()</v>
<v>Len = integer()</v>
<v>Timeout = timeout()</v>
<v>Data = string() | binary()</v>
diff --git a/lib/ssh/doc/src/terminology.xml b/lib/ssh/doc/src/terminology.xml
new file mode 100644
index 0000000000..db1e08970d
--- /dev/null
+++ b/lib/ssh/doc/src/terminology.xml
@@ -0,0 +1,185 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2018</year>
+ <year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Terminology</title>
+ <prepared></prepared>
+ <docno></docno>
+ <approved></approved>
+ <date></date>
+ <rev></rev>
+ <file>terminology.xml</file>
+ </header>
+
+ <section>
+ <title>General Information</title>
+ <p>In the following terms that may cause confusion are explained.
+ </p>
+ </section>
+
+ <section>
+ <title>The term "user"</title>
+ <p>A "user" is a term that everyone understands intuitively. However, the understandings may differ which can
+ cause confusion.
+ </p>
+ <p>The term is used differently in <url href="http://www.openssh.com">OpenSSH</url> and SSH in Erlang/OTP.
+ The reason is the different environments and use cases that are not immediatly obvious.
+ </p>
+ <p>This chapter aims at explaining the differences and giving a rationale for why Erlang/OTP handles "user" as
+ it does.
+ </p>
+
+ <section>
+ <title>In OpenSSH</title>
+ <p>Many have been in contact with the command 'ssh' on a Linux machine (or similar) to remotly log in on
+ another machine. One types
+ </p>
+ <code>ssh host</code>
+ <p>to log in on the machine named <c>host</c>. The command prompts for your password on the remote <c>host</c> and
+ then you can read, write and execute as your <i>user name</i> has rights on the remote <c>host</c>. There are
+ stronger variants with pre-distributed keys or certificates, but that are for now just details in the
+ authentication process.
+ </p>
+ <p>You could log in as the user <c>anotheruser</c> with
+ </p>
+ <code>ssh anotheruser@host</code>
+ <p>and you will then be enabled to act as <c>anotheruser</c> on the <c>host</c> if authorized correctly.
+ </p>
+ <p>So what does <i>"your user name has rights"</i> mean? In a UNIX/Linux/etc context it is exactly as that context:
+ The <i>user</i> could read, write and execute programs according to the OS rules.
+ In addition, the user has a home directory (<c>$HOME</c>) and there is a <c>$HOME/.ssh/</c> directory
+ with ssh-specific files.
+ </p>
+ <section>
+ <title>SSH password authentication</title>
+ <p>When SSH tries to log in to a host, the ssh protocol communicates the user name (as a string) and a password.
+ The remote ssh server checks that there is such a user defined and that the provided password is acceptable.
+ </p>
+ <p>If so, the user is authorized.
+ </p>
+ </section>
+ <section>
+ <title>SSH public key authentication</title>
+ <p>This is a stronger method where the ssh protocol brings the user name, the user's public key and some
+ cryptographic information which we could ignore here.
+ </p>
+ <p>The ssh server on the remote host checks:
+ </p>
+ <list>
+ <item>That the <i>user</i> has a home directory,</item>
+ <item>that home directory contains a .ssh/ directory and</item>
+ <item>the .ssh/ directory contains the public key just received in the <c>authorized_keys</c> file</item>
+ </list>
+ <p>if so, the user is authorized.
+ </p>
+ </section>
+ <section>
+ <title>The SSH server on UNIX/Linux/etc after a succesful authentication</title>
+ <p>After a succesful incoming authentication, a new process runs as the just authenticated user.</p>
+ <p>Next step is to start a service according to the ssh request. In case of a request of a shell,
+ a new one is started which handles the OS-commands that arrives from the client (that's "you").
+ </p>
+ <p>In case of a sftp request, an sftp server is started in with the user's rights. So it could read, write or delete
+ files if allowed for that user.
+ </p>
+ </section>
+ </section>
+
+ <section>
+ <title>In Erlang/OTP SSH</title>
+ <p>For the Erlang/OTP SSH server the situation is different. The server executes in an Erlang process
+ in the Erlang emulator which in turn executes in an OS process. The emulator does not try to change its
+ user when authenticated over the SSH protocol.
+ So the remote user name is only for authentication purposes in the Erlang/OTP SSH application.
+ </p>
+ <section>
+ <title>Password authentication in Erlang SSH</title>
+ <p>The Erlang/OTP SSH server checks the user name and password in the following order:
+ </p>
+ <list type="ordered">
+ <item>If a
+ <seealso marker="ssh:ssh#option-pwdfun"><c>pwdfun</c></seealso>
+ is defined, that one is called and the returned boolean is the authentication result.
+ </item>
+ <item>Else, if the
+ <seealso marker="ssh:ssh#option-user_passwords"><c>user_passwords</c></seealso>
+ option is defined and the username and the password matches, the authentication is a success.
+ </item>
+ <item>Else, if the option
+ <seealso marker="ssh:ssh#option-password"><c>password</c></seealso>
+ is defined and matches the password the authentication is a success.
+ Note that the use of this option is not recommended in non-test code.
+ </item>
+ </list>
+ </section>
+ <section>
+ <title>Public key authentication in Erlang SSH</title>
+ <p>The user name, public key and cryptographic data (a signature) that is sent by the client, are used as follows
+ (some steps left out for clearity):
+ </p>
+ <list type="ordered">
+ <item>A callback module is selected using the options
+ <seealso marker="ssh:ssh#type-key_cb_common_option"><c>key_cb</c></seealso>.
+ </item>
+ <item>The callback module is used to check that the provided public key is one of the user's pre-stored.
+ In case of the default callback module, the files <c>authorized_keys</c> and <c>authorized_keys2</c>
+ are searched in a directory found in the following order:
+ <list>
+ <item>If the option
+ <seealso marker="ssh:ssh_file#type-user_dir_fun_common_option"><c>user_dir_fun</c></seealso>
+ is defined, that fun is called and the returned directory is used,
+ </item>
+ <item>Else, If the option
+ <seealso marker="ssh:ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
+ is defined, that directory is used,
+ </item>
+ <item>Else the subdirectory <c>.ssh</c> in the home directory of the user executing
+ the OS process of the Erlang emulator is used.
+ </item>
+ </list>
+ If the provided public key is not found, the authentication fails.
+ </item>
+ <item>Finally, if the provided public key is found, the signature provided by the client is checked with
+ the public key.
+ </item>
+ </list>
+ </section>
+ <section>
+ <title>The Erlang/OTP SSH server after a succesful authentication</title>
+ <p>After a successful authentication an <i>Erlang process</i> is handling the service request from the remote
+ ssh client. The rights of that process are those of the user of the OS process running the Erlang emulator.
+ </p>
+ <p>If a shell service request arrives to the server, an <i>Erlang shell</i> is opened in the server's emulator.
+ The rights in that shell is independent of the just authenticated user.
+ </p>
+ <p>In case of an sftp request, an sftp server is started with the rights of the user of the Erlang emulator's OS
+ process. So with sftp the authenticated user does not influence the rights.
+ </p>
+ <p>So after an authentication, the user name is not used anymore and has no influence.
+ </p>
+ </section>
+ </section>
+ </section>
+</chapter>
+
diff --git a/lib/ssh/doc/src/usersguide.xml b/lib/ssh/doc/src/usersguide.xml
index 38ffa48cde..8a4df208d8 100644
--- a/lib/ssh/doc/src/usersguide.xml
+++ b/lib/ssh/doc/src/usersguide.xml
@@ -36,5 +36,6 @@
</description>
<xi:include href="introduction.xml"/>
<xi:include href="using_ssh.xml"/>
+ <xi:include href="terminology.xml"/>
<xi:include href="configure_algos.xml"/>
</part>
diff --git a/lib/ssh/doc/src/using_ssh.xml b/lib/ssh/doc/src/using_ssh.xml
index 80662e9a70..4455d5ecc5 100644
--- a/lib/ssh/doc/src/using_ssh.xml
+++ b/lib/ssh/doc/src/using_ssh.xml
@@ -74,16 +74,17 @@
<marker id="Running an Erlang ssh Daemon"></marker>
<title>Running an Erlang ssh Daemon</title>
- <p>The <c>system_dir</c> option must be a directory containing a host
- key file and it defaults to <c>/etc/ssh</c>. For details, see Section
- Configuration Files in <seealso
- marker="SSH_app">ssh(6)</seealso>.
+ <p>The
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>
+ option must be a directory containing a host key file and it defaults to <c>/etc/ssh</c>.
+ For details, see Section Configuration Files in <seealso marker="SSH_app">ssh(6)</seealso>.
</p>
<note><p>Normally, the <c>/etc/ssh</c> directory is only readable by root.</p>
</note>
- <p>The option <c>user_dir</c> defaults to directory <c>users ~/.ssh</c>.</p>
+ <p>The option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
+ defaults to directory <c>users ~/.ssh</c>.</p>
<p><em>Step 1.</em> To run the example without root privileges,
generate new keys and host keys:</p>
diff --git a/lib/ssh/src/ssh.hrl b/lib/ssh/src/ssh.hrl
index 94b9f3a196..f645201c4f 100644
--- a/lib/ssh/src/ssh.hrl
+++ b/lib/ssh/src/ssh.hrl
@@ -173,7 +173,7 @@
-type common_options() :: [ common_option() ].
-type common_option() ::
- user_dir_common_option()
+ ssh_file:user_dir_common_option()
| profile_common_option()
| max_idle_time_common_option()
| key_cb_common_option()
@@ -182,6 +182,7 @@
| ssh_msg_debug_fun_common_option()
| rekey_limit_common_option()
| id_string_common_option()
+ | pref_public_key_algs_common_option()
| preferred_algorithms_common_option()
| modify_algorithms_common_option()
| auth_methods_common_option()
@@ -191,8 +192,6 @@
-define(COMMON_OPTION, common_option()).
-
--type user_dir_common_option() :: {user_dir, false | string()}.
-type profile_common_option() :: {profile, atom() }.
-type max_idle_time_common_option() :: {idle_time, timeout()}.
-type rekey_limit_common_option() :: {rekey_limit, Bytes::limit_bytes() |
@@ -211,6 +210,7 @@
{ssh_msg_debug_fun, fun((ssh:connection_ref(),AlwaysDisplay::boolean(),Msg::binary(),LanguageTag::binary()) -> any()) } .
-type id_string_common_option() :: {id_string, string() | random | {random,Nmin::pos_integer(),Nmax::pos_integer()} }.
+-type pref_public_key_algs_common_option() :: {pref_public_key_algs, [pubkey_alg()] } .
-type preferred_algorithms_common_option():: {preferred_algorithms, algs_list()}.
-type modify_algorithms_common_option() :: {modify_algorithms, modify_algs_list()}.
-type auth_methods_common_option() :: {auth_methods, string() }.
@@ -223,14 +223,13 @@
{transport, {atom(),atom(),atom()} }
| {vsn, {non_neg_integer(),non_neg_integer()} }
| {tstflg, list(term())}
- | {user_dir_fun, fun()}
+ | ssh_file:user_dir_fun_common_option()
| {max_random_length_padding, non_neg_integer()} .
-type client_option() ::
- pref_public_key_algs_client_option()
- | pubkey_passphrase_client_options()
+ ssh_file:pubkey_passphrase_client_options()
| host_accepting_client_options()
| authentication_client_options()
| diffie_hellman_group_exchange_client_option()
@@ -244,12 +243,6 @@
{keyboard_interact_fun, fun((term(),term(),term()) -> term())}
| opaque_common_options().
--type pref_public_key_algs_client_option() :: {pref_public_key_algs, [pubkey_alg()] } .
-
--type pubkey_passphrase_client_options() :: {dsa_pass_phrase, string()}
- | {rsa_pass_phrase, string()}
- | {ecdsa_pass_phrase, string()} .
-
-type host_accepting_client_options() ::
{silently_accept_hosts, accept_hosts()}
| {user_interaction, boolean()}
@@ -299,8 +292,9 @@
-type 'shell_fun/1'() :: fun((User::string()) -> pid()) .
-type 'shell_fun/2'() :: fun((User::string(), PeerAddr::inet:ip_address()) -> pid()).
--type exec_daemon_option() :: {exec, 'exec_fun/1'() | 'exec_fun/2'() | 'exec_fun/3'() }.
-
+-type exec_daemon_option() :: {exec, exec_spec()} .
+-type exec_spec() :: {direct, exec_fun()} .
+-type exec_fun() :: 'exec_fun/1'() | 'exec_fun/2'() | 'exec_fun/3'().
-type 'exec_fun/1'() :: fun((Cmd::string()) -> exec_result()) .
-type 'exec_fun/2'() :: fun((Cmd::string(), User::string()) -> exec_result()) .
-type 'exec_fun/3'() :: fun((Cmd::string(), User::string(), ClientAddr::ip_port()) -> exec_result()) .
@@ -311,7 +305,7 @@
-type send_ext_info_daemon_option() :: {send_ext_info, boolean()} .
-type authentication_daemon_options() ::
- {system_dir, string()}
+ ssh_file:system_dir_daemon_option()
| {auth_method_kb_interactive_data, prompt_texts() }
| {user_passwords, [{UserName::string(),Pwd::string()}]}
| {password, string()}
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 4b41c10cbb..30eafc2f2a 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -983,6 +983,10 @@ handle_event(_, #ssh_msg_userauth_info_request{}, {userauth_keyboard_interactive
%%% ######## {connected, client|server} ####
+%% Skip ext_info messages in connected state (for example from OpenSSH >= 7.7)
+handle_event(_, #ssh_msg_ext_info{}, {connected,_Role}, D) ->
+ {keep_state, D};
+
handle_event(_, {#ssh_msg_kexinit{},_}, {connected,Role}, D0) ->
{KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D0#data.ssh_params),
D = D0#data{ssh_params = Ssh,
diff --git a/lib/ssh/src/ssh_file.erl b/lib/ssh/src/ssh_file.erl
index 832952ed52..669b0f9be2 100644
--- a/lib/ssh/src/ssh_file.erl
+++ b/lib/ssh/src/ssh_file.erl
@@ -39,6 +39,23 @@
is_auth_key/3]).
+-export_type([system_dir_daemon_option/0,
+ user_dir_common_option/0,
+ user_dir_fun_common_option/0,
+ pubkey_passphrase_client_options/0
+ ]).
+
+-type system_dir_daemon_option() :: {system_dir, string()}.
+-type user_dir_common_option() :: {user_dir, string()}.
+-type user_dir_fun_common_option() :: {user_dir_fun, user2dir()}.
+-type user2dir() :: fun((RemoteUserName::string()) -> UserDir :: string()) .
+
+-type pubkey_passphrase_client_options() :: {dsa_pass_phrase, string()}
+ | {rsa_pass_phrase, string()}
+ | {ecdsa_pass_phrase, string()} .
+
+
+
-define(PERM_700, 8#700).
-define(PERM_644, 8#644).
diff --git a/lib/ssh/src/ssh_options.erl b/lib/ssh/src/ssh_options.erl
index bc9f2156bc..bc9b0b6eda 100644
--- a/lib/ssh/src/ssh_options.erl
+++ b/lib/ssh/src/ssh_options.erl
@@ -452,12 +452,6 @@ default(client) ->
class => user_options
},
- {pref_public_key_algs, def} =>
- #{default => ssh_transport:default_algorithms(public_key),
- chk => fun check_pref_public_key_algs/1,
- class => user_options
- },
-
{dh_gex_limits, def} =>
#{default => {1024, 6144, 8192}, % FIXME: Is this true nowadays?
chk => fun({Min,I,Max}) ->
@@ -523,6 +517,12 @@ default(common) ->
class => user_options
},
+ {pref_public_key_algs, def} =>
+ #{default => ssh_transport:default_algorithms(public_key),
+ chk => fun check_pref_public_key_algs/1,
+ class => user_options
+ },
+
{preferred_algorithms, def} =>
#{default => ssh:default_algorithms(),
chk => fun check_preferred_algorithms/1,
diff --git a/lib/ssh/src/ssh_transport.erl b/lib/ssh/src/ssh_transport.erl
index c5b0704925..7424c9bcaf 100644
--- a/lib/ssh/src/ssh_transport.erl
+++ b/lib/ssh/src/ssh_transport.erl
@@ -759,8 +759,7 @@ ext_info_message(#ssh{role=server,
send_ext_info=true,
opts = Opts} = Ssh0) ->
AlgsList = lists:map(fun erlang:atom_to_list/1,
- proplists:get_value(public_key,
- ?GET_OPT(preferred_algorithms, Opts))),
+ ?GET_OPT(pref_public_key_algs, Opts)),
Msg = #ssh_msg_ext_info{nr_extensions = 1,
data = [{"server-sig-algs", string:join(AlgsList,",")}]
},
diff --git a/lib/ssh/test/.gitignore b/lib/ssh/test/.gitignore
new file mode 100644
index 0000000000..c9d5f086b3
--- /dev/null
+++ b/lib/ssh/test/.gitignore
@@ -0,0 +1,5 @@
+
+
+property_test/ssh_eqc_client_server_dirs/system
+property_test/ssh_eqc_client_server_dirs/user
+
diff --git a/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl b/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl
index 6d0d8f5d99..f4b521356f 100644
--- a/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl
+++ b/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl
@@ -58,6 +58,7 @@
%%% Properties:
prop_seq(Config) ->
+ error_logger:tty(false),
{ok,Pid} = ssh_eqc_event_handler:add_report_handler(),
{_, _, Port} = init_daemon(Config),
numtests(1000,
@@ -66,16 +67,25 @@ prop_seq(Config) ->
send_bad_sequence(Port, Delay, Pid),
not any_relevant_error_report(Pid)
catch
- C:E -> io:format('~p:~p~n',[C,E]),
+ C:E:S -> ct:log("~p:~p~n~p",[C,E,S]),
false
end
)).
send_bad_sequence(Port, Delay, Pid) ->
- {ok,S} = gen_tcp:connect("localhost",Port,[]),
- gen_tcp:send(S,"Illegal info-string\r\n"),
- ssh_test_lib:sleep_microsec(Delay),
- gen_tcp:close(S).
+ send_bad_sequence(Port, Delay, Pid, 10).
+
+send_bad_sequence(Port, Delay, Pid, N) ->
+ case gen_tcp:connect("localhost",Port,[]) of
+ {ok,S} ->
+ gen_tcp:send(S,"Illegal info-string\r\n"),
+ ssh_test_lib:sleep_microsec(Delay),
+ gen_tcp:close(S);
+
+ {error,econnreset} when N>0 ->
+ timer:sleep(1),
+ send_bad_sequence(Port, Delay, Pid, N-1)
+ end.
any_relevant_error_report(Pid) ->
{ok, Reports} = ssh_eqc_event_handler:get_reports(Pid),
diff --git a/lib/ssh/test/property_test/ssh_eqc_client_server.erl b/lib/ssh/test/property_test/ssh_eqc_client_server.erl
index 39d0b4e410..acb0faa0c7 100644
--- a/lib/ssh/test/property_test/ssh_eqc_client_server.erl
+++ b/lib/ssh/test/property_test/ssh_eqc_client_server.erl
@@ -22,25 +22,27 @@
-module(ssh_eqc_client_server).
-compile(export_all).
+
+-proptest([proper]).
--include_lib("common_test/include/ct.hrl").
-
--ifdef(PROPER).
-%% Proper is not supported.
--else.
--ifdef(TRIQ).
-%% Proper is not supported.
+-ifndef(PROPER).
-else.
+%% Only use proper
+%%
+%% Previously only EQC was supported, but the changes to support PROPER is not
+%% just a wrapper. Since we do not have access to eqc we can't test the changes
+%% so therefore eqc is disabeled.
+%% However, with access to eqc it ought to be quite easy to re-enable eqc by
+%% studying the diff.
+-include_lib("proper/include/proper.hrl").
+-define(MOD_eqc,proper).
+
+-include_lib("common_test/include/ct.hrl").
%% Limit the testing time on CI server... this needs to be improved in % from total budget.
-define(TESTINGTIME(Prop), eqc:testing_time(30,Prop)).
-
--include_lib("eqc/include/eqc.hrl").
--include_lib("eqc/include/eqc_statem.hrl").
--eqc_group_commands(true).
-
-define(SSH_DIR,"ssh_eqc_client_server_dirs").
-define(sec, *1000).
@@ -51,10 +53,6 @@
port
}).
--record(conn,{ref,
- srvr_ref
- }).
-
-record(chan, {ref,
conn_ref,
subsystem,
@@ -65,7 +63,7 @@
initialized = false,
servers = [], % [#srvr{}]
clients = [],
- connections = [], % [#conn{}]
+ connections = [],
channels = [], % [#chan{}]
data_dir
}).
@@ -80,9 +78,8 @@
-define(SUBSYSTEMS, ["echo1", "echo2", "echo3", "echo4"]).
--define(SERVER_ADDRESS, { {127,1,0,choose(1,254)}, % IP
- choose(1024,65535) % Port
- }).
+-define(SERVER_ADDRESS, {127,0,0,1}). % Server listening IP. Darwin, Solaris & FreeBSD
+ % dislikes all other in 127.0.0.0/24
-define(SERVER_EXTRA_OPTIONS, [{parallel_login,bool()}] ).
@@ -104,10 +101,12 @@
%% To be called as eqc:quickcheck( ssh_eqc_client_server:prop_seq() ).
prop_seq() ->
- ?TESTINGTIME(do_prop_seq(?SSH_DIR)).
+ error_logger:tty(false),
+ ?TESTINGTIME(do_prop_seq(?SSH_DIR)).
%% To be called from a common_test test suite
prop_seq(CT_Config) ->
+ error_logger:tty(false),
do_prop_seq(full_path(?SSH_DIR, CT_Config)).
@@ -124,10 +123,12 @@ full_path(SSHdir, CT_Config) ->
SSHdir).
%%%----
prop_parallel() ->
+ error_logger:tty(false),
?TESTINGTIME(do_prop_parallel(?SSH_DIR)).
%% To be called from a common_test test suite
prop_parallel(CT_Config) ->
+ error_logger:tty(false),
do_prop_parallel(full_path(?SSH_DIR, CT_Config)).
do_prop_parallel(DataDir) ->
@@ -139,22 +140,22 @@ do_prop_parallel(DataDir) ->
end).
%%%----
-prop_parallel_multi() ->
- ?TESTINGTIME(do_prop_parallel_multi(?SSH_DIR)).
-
-%% To be called from a common_test test suite
-prop_parallel_multi(CT_Config) ->
- do_prop_parallel_multi(full_path(?SSH_DIR, CT_Config)).
-
-do_prop_parallel_multi(DataDir) ->
- setup_rsa(DataDir),
- ?FORALL(Repetitions,?SHRINK(1,[10]),
- ?FORALL(Cmds,parallel_commands(?MODULE),
- ?ALWAYS(Repetitions,
- begin
- {H,Sf,Result} = run_parallel_commands(?MODULE,Cmds,[{data_dir,DataDir}]),
- present_result(?MODULE, Cmds, {H,Sf,Result}, Result==ok)
- end))).
+%% prop_parallel_multi() ->
+%% ?TESTINGTIME(do_prop_parallel_multi(?SSH_DIR)).
+
+%% %% To be called from a common_test test suite
+%% prop_parallel_multi(CT_Config) ->
+%% do_prop_parallel_multi(full_path(?SSH_DIR, CT_Config)).
+
+%% do_prop_parallel_multi(DataDir) ->
+%% setup_rsa(DataDir),
+%% ?FORALL(Repetitions,?SHRINK(1,[10]),
+%% ?FORALL(Cmds,parallel_commands(?MODULE),
+%% ?ALWAYS(Repetitions,
+%% begin
+%% {H,Sf,Result} = run_parallel_commands(?MODULE,Cmds,[{data_dir,DataDir}]),
+%% present_result(?MODULE, Cmds, {H,Sf,Result}, Result==ok)
+%% end))).
%%%================================================================
%%% State machine spec
@@ -169,13 +170,50 @@ initial_state(DataDir) ->
ssh:start().
%%%----------------
-weight(S, ssh_send) -> 5*length([C || C<-S#state.channels, has_subsyst(C)]);
-weight(S, ssh_start_subsyst) -> 3*length([C || C<-S#state.channels, no_subsyst(C)]);
+weight(S, ssh_send) -> 20*length([C || C<-S#state.channels, has_subsyst(C)]);
+weight(S, ssh_start_subsyst) -> 10*length([C || C<-S#state.channels, no_subsyst(C)]);
weight(S, ssh_close_channel) -> 2*length([C || C<-S#state.channels, has_subsyst(C)]);
-weight(S, ssh_open_channel) -> length(S#state.connections);
+weight(S, ssh_open_channel) -> 2*length(S#state.connections);
weight(_S, _) -> 1.
%%%----------------
+fns() -> [initial_state,
+ ssh_server,
+ ssh_client,
+ ssh_open_connection,
+ ssh_close_connection,
+ ssh_open_channel,
+ ssh_close_channel,
+ ssh_start_subsyst,
+ ssh_send
+ ].
+
+call_f(Name, Sfx) ->
+ case get({Name,Sfx}) of
+ undefined -> F = list_to_atom(lists:concat([Name,"_",Sfx])),
+ put({Name,Sfx}, F),
+ F;
+ F when is_atom(F) -> F
+ end.
+
+-define(call(Name, What, Args), apply(?MODULE, call_f(Name,What), Args)).
+
+symbolic_call(S,Name) -> {call, ?MODULE, Name, ?call(Name,args,[S])}.
+
+may_generate(S, F) -> ?call(F,pre,[S]).
+
+command(S) ->
+ frequency([{weight(S,F), symbolic_call(S,F)} || F <- fns(),
+ may_generate(S, F)]
+ ).
+
+precondition(S, {call,_M,F,As}) -> try ?call(F, pre, [S,As])
+ catch _:undef -> try ?call(F,pre,[S]) catch _:undef -> true end
+ end.
+next_state(S, Res, {call,_M,F,As}) -> try ?call(F, next, [S,Res,As]) catch _:undef -> S end.
+postcondition(S, {call,_M,F,As}, Res) -> try ?call(F, post, [S,As,Res]) catch _:undef -> true end.
+
+%%%----------------
%%% Initialize
initial_state_pre(S) -> not S#state.initialized.
@@ -200,24 +238,34 @@ ssh_server_pre(S) -> S#state.initialized andalso
ssh_server_args(_) -> [?SERVER_ADDRESS, {var,data_dir}, ?SERVER_EXTRA_OPTIONS].
-ssh_server({IP,Port}, DataDir, ExtraOptions) ->
- ok(ssh:daemon(IP, Port,
- [
- {system_dir, system_dir(DataDir)},
- {user_dir, user_dir(DataDir)},
- {subsystems, [{SS, {ssh_eqc_subsys, [SS]}} || SS <- ?SUBSYSTEMS]}
- | ExtraOptions
- ])).
-
-ssh_server_post(_S, _Args, {error,eaddrinuse}) -> true;
-ssh_server_post(_S, _Args, Result) -> is_ok(Result).
-
-ssh_server_next(S, {error,eaddrinuse}, _) -> S;
-ssh_server_next(S, Result, [{IP,Port},_,_]) ->
- S#state{servers=[#srvr{ref = Result,
- address = IP,
- port = Port}
- | S#state.servers]}.
+ssh_server(IP0, DataDir, ExtraOptions) ->
+ case ssh:daemon(IP0, 0,
+ [
+ {system_dir, system_dir(DataDir)},
+ {user_dir, user_dir(DataDir)},
+ {subsystems, [{SS, {ssh_eqc_subsys, [SS]}} || SS <- ?SUBSYSTEMS]}
+ | ExtraOptions
+ ]) of
+ {ok,DaemonRef} ->
+ case ssh:daemon_info(DaemonRef) of
+ {ok, Props} ->
+ Port = proplists:get_value(port,Props),
+ IP = proplists:get_value(ip,Props),
+ #srvr{ref = DaemonRef,
+ address = IP,
+ port = Port};
+ Other ->
+ Other
+ end;
+ Other ->
+ Other
+ end.
+
+ssh_server_post(_S, _Args, #srvr{port=Port}) -> (0 < Port) andalso (Port < 65536);
+ssh_server_post(_S, _Args, _) -> false.
+
+ssh_server_next(S, Srvr, _) ->
+ S#state{servers=[Srvr | S#state.servers]}.
%%%----------------
%%% Start a new client
@@ -271,8 +319,7 @@ ssh_open_connection(#srvr{address=Ip, port=Port}, DataDir) ->
ssh_open_connection_post(_S, _Args, Result) -> is_ok(Result).
-ssh_open_connection_next(S, ConnRef, [#srvr{ref=SrvrRef},_]) ->
- S#state{connections=[#conn{ref=ConnRef, srvr_ref=SrvrRef}|S#state.connections]}.
+ssh_open_connection_next(S, ConnRef, [_,_]) -> S#state{connections=[ConnRef|S#state.connections]}.
%%%----------------
%%% Stop a new connection
@@ -282,12 +329,12 @@ ssh_close_connection_pre(S) -> S#state.connections /= [].
ssh_close_connection_args(S) -> [oneof(S#state.connections)].
-ssh_close_connection(#conn{ref=ConnectionRef}) -> ssh:close(ConnectionRef).
+ssh_close_connection(ConnectionRef) -> ssh:close(ConnectionRef).
-ssh_close_connection_next(S, _, [Conn=#conn{ref=ConnRef}]) ->
- S#state{connections = S#state.connections--[Conn],
- channels = [C || C <- S#state.channels,
- C#chan.conn_ref /= ConnRef]
+ssh_close_connection_next(S, _, [ConnRef]) ->
+ S#state{connections = S#state.connections--[ConnRef],
+ channels = [C || C <- S#state.channels,
+ C#chan.conn_ref /= ConnRef]
}.
%%%----------------
@@ -299,14 +346,14 @@ ssh_open_channel_pre(S) -> S#state.connections /= [].
ssh_open_channel_args(S) -> [oneof(S#state.connections)].
%%% For re-arrangement in parallel tests.
-ssh_open_channel_pre(S,[C]) -> lists:member(C,S#state.connections).
+ssh_open_channel_pre(S,[C]) when is_record(S,state) -> lists:member(C,S#state.connections).
-ssh_open_channel(#conn{ref=ConnectionRef}) ->
+ssh_open_channel(ConnectionRef) ->
ok(ssh_connection:session_channel(ConnectionRef, 20?sec)).
ssh_open_channel_post(_S, _Args, Result) -> is_ok(Result).
-ssh_open_channel_next(S, ChannelRef, [#conn{ref=ConnRef}]) ->
+ssh_open_channel_next(S, ChannelRef, [ConnRef]) ->
S#state{channels=[#chan{ref=ChannelRef,
conn_ref=ConnRef}
| S#state.channels]}.
@@ -326,9 +373,7 @@ ssh_close_channel_next(S, _, [C]) ->
S#state{channels = [Ci || Ci <- S#state.channels,
sig(C) /= sig(Ci)]}.
-
sig(C) -> {C#chan.ref, C#chan.conn_ref}.
-
%%%----------------
%%% Start a sub system on a channel
@@ -361,9 +406,10 @@ ssh_start_subsyst_next(S, _Result, [C,SS,Pid|_]) ->
ssh_send_pre(S) -> lists:any(fun has_subsyst/1, S#state.channels).
-ssh_send_args(S) -> [oneof(lists:filter(fun has_subsyst/1, S#state.channels)),
- choose(0,1),
- message()].
+ssh_send_args(S) ->
+ [oneof(lists:filter(fun has_subsyst/1, S#state.channels)),
+ choose(0,1),
+ message()].
%% For re-arrangement in parallel tests.
ssh_send_pre(S, [C|_]) -> lists:member(C, S#state.channels).
@@ -388,17 +434,17 @@ ssh_send(C=#chan{conn_ref=ConnectionRef, ref=ChannelRef, client_pid=Pid}, Type,
end).
ssh_send_blocking(_S, _Args) ->
- true.
+ true.
ssh_send_post(_S, [C,_,Msg], Response) when is_binary(Response) ->
- Expected = ssh_eqc_subsys:response(modify_msg(C,Msg), C#chan.subsystem),
+ Expected = ssh_eqc_subsys:response(modify_msg(C,Msg), C#chan.subsystem),
case Response of
Expected -> true;
_ -> {send_failed, size(Response), size(Expected)}
end;
ssh_send_post(_S, _Args, Response) ->
- {error,Response}.
+ {error,Response}.
modify_msg(_, <<>>) -> <<>>;
@@ -440,7 +486,11 @@ present_result(_Module, Cmds, _Triple, true) ->
true)))));
present_result(Module, Cmds, Triple, false) ->
- pretty_commands(Module, Cmds, Triple, [{show_states,true}], false).
+ pretty_comands(Module, Cmds, Triple, [{show_states,true}], false),
+ false. % Proper dislikes non-boolean results while eqc treats non-true as false.
+
+pretty_comands(Module, Cmds, Triple, Opts, Bool) ->
+ ct:log("Module = ~p,~n Cmds = ~p,~n Triple = ~p,~n Opts = ~p,~n Bool = ~p",[Module, Cmds, Triple, Opts, Bool]).
@@ -476,23 +526,35 @@ traverse_commands(Fseq, Fpar, {Seq, ParLs}) -> lists:append([Fseq(Seq)|Fpar(ParL
print_frequencies() -> print_frequencies(10).
print_frequencies(Ngroups) -> fun([]) -> io:format('Empty list!~n',[]);
- (L ) -> print_frequencies(L,Ngroups,0,element(1,lists:last(L)))
+ (L ) ->
+ try
+ M = lists:last(L),
+ Max = if is_integer(M) -> M;
+ is_tuple(M) -> element(1,L)
+ end,
+ print_frequencies(L,Ngroups,0,Max)
+ catch
+ C:E:S ->
+ ct:pal("~p:~p ~p:~p~n~p~n~p",[?MODULE,?LINE,C,E,S,L])
+ end
end.
+
print_frequencies(Ngroups, MaxValue) -> fun(L) -> print_frequencies(L,Ngroups,0,MaxValue) end.
print_frequencies(L, N, Min, Max) when N>Max -> print_frequencies(L++[{N,0}], N, Min, N);
-print_frequencies(L, N, Min, Max) ->
-%%io:format('L=~p~n',[L]),
+print_frequencies(L, N, Min, Max0) ->
try
+ Interval = round((Max0-Min)/N),
+ Max = Max0 + (Max0 rem Interval),
IntervalUpperLimits =
lists:reverse(
- [Max | tl(lists:reverse(lists:seq(Min,Max,round((Max-Min)/N))))]
+ [Max | tl(lists:reverse(lists:seq(Min,Max,Interval)))]
),
{Acc0,_} = lists:mapfoldl(fun(Upper,Lower) ->
{{{Lower,Upper},0}, Upper+1}
end, hd(IntervalUpperLimits), tl(IntervalUpperLimits)),
- Fs0 = get_frequencies(L, Acc0),
+ Fs0 = get_frequencies(L, Acc0),
SumVal = lists:sum([V||{_,V}<-Fs0]),
Fs = with_percentage(Fs0, SumVal),
Mean = mean(L),
@@ -517,7 +579,6 @@ print_frequencies(L, N, Min, Max) ->
|| {Interval={Rlow,Rhigh},Val,Percent} <- Fs],
io:format('~*c ~*c~n',[2*Npos_range,32,Npos_value+2,$-]),
io:format('~*c ~*w~n',[2*Npos_range,32,Npos_value,SumVal])
- %%,io:format('L=~p~n',[L])
catch
C:E ->
io:format('*** Faild printing (~p:~p) for~n~p~n',[C,E,L])
@@ -527,6 +588,8 @@ get_frequencies([{I,Num}|T], [{{Lower,Upper},Cnt}|Acc]) when Lower=<I,I=<Upper -
get_frequencies(T, [{{Lower,Upper},Cnt+Num}|Acc]);
get_frequencies(L=[{I,_Num}|_], [Ah={{_Lower,Upper},_Cnt}|Acc]) when I>Upper ->
[Ah | get_frequencies(L,Acc)];
+get_frequencies([I|T], Acc) when is_integer(I) ->
+ get_frequencies([{I,1}|T], Acc);
get_frequencies([], Acc) ->
Acc.
@@ -616,4 +679,3 @@ erase_dir(Dir) ->
file:del_dir(Dir).
-endif.
--endif.
diff --git a/lib/ssh/test/ssh_compat_SUITE.erl b/lib/ssh/test/ssh_compat_SUITE.erl
index 1c607bebe8..af85ef7aee 100644
--- a/lib/ssh/test/ssh_compat_SUITE.erl
+++ b/lib/ssh/test/ssh_compat_SUITE.erl
@@ -648,6 +648,7 @@ setup_remote_priv_and_local_auth_keys(KeyAlg, IP, Port, UserDir, Config) ->
{silently_accept_hosts,true},
{user_interaction,false}
]),
+ rm_id_in_remote_dir(Ch, ".ssh"),
_ = ssh_sftp:make_dir(Ch, ".ssh"),
DstFile = filename:join(".ssh", dst_filename(user,KeyAlg)),
ok = ssh_sftp:write_file(Ch, DstFile, Priv),
@@ -658,6 +659,18 @@ setup_remote_priv_and_local_auth_keys(KeyAlg, IP, Port, UserDir, Config) ->
ok = ssh:close(Cc),
UserDir.
+rm_id_in_remote_dir(Ch, Dir) ->
+ case ssh_sftp:list_dir(Ch, Dir) of
+ {error,_Error} ->
+ ok;
+ {ok,FileNames} ->
+ lists:foreach(fun("id_"++_ = F) ->
+ ok = ssh_sftp:delete(Ch, filename:join(Dir,F));
+ (_) ->
+ leave
+ end, FileNames)
+ end.
+
user_priv_pub_keys(Config, KeyAlg) -> priv_pub_keys("users_keys", user, Config, KeyAlg).
host_priv_pub_keys(Config, KeyAlg) -> priv_pub_keys("host_keys", host, Config, KeyAlg).
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all b/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
index 0dcf8cb570..c2e77fcc79 100755
--- a/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
+++ b/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
@@ -18,6 +18,12 @@ SSH_SSL_VERSIONS=(\
openssh 7.6p1 openssl 1.0.2n \
\
openssh 7.6p1 libressl 2.6.4 \
+ \
+ openssh 7.7p1 openssl 1.0.2p \
+ openssh 7.8p1 openssl 1.0.2p \
+ openssh 7.9p1 openssl 1.0.2p \
+ \
+ openssh 7.9p1 libressl 2.6.4 \
)
if [ "x$1" == "x-b" ]
diff --git a/lib/ssh/test/ssh_property_test_SUITE.erl b/lib/ssh/test/ssh_property_test_SUITE.erl
index 3318b86d39..9aaac898a0 100644
--- a/lib/ssh/test/ssh_property_test_SUITE.erl
+++ b/lib/ssh/test/ssh_property_test_SUITE.erl
@@ -46,8 +46,9 @@ groups() ->
[{messages, [], [decode,
decode_encode]},
{client_server, [], [client_server_sequential,
- client_server_parallel,
- client_server_parallel_multi]}
+ client_server_parallel
+ %% client_server_parallel_multi
+ ]}
].
@@ -62,7 +63,7 @@ end_per_suite(Config) ->
%%% if we run proper.
init_per_group(client_server, Config) ->
case proplists:get_value(property_test_tool,Config) of
- eqc -> Config;
+ proper -> Config;
X -> {skip, lists:concat([X," is not supported"])}
end;
init_per_group(_, Config) ->
diff --git a/lib/ssl/doc/src/notes.xml b/lib/ssl/doc/src/notes.xml
index 673431ed0a..46fd8ab180 100644
--- a/lib/ssl/doc/src/notes.xml
+++ b/lib/ssl/doc/src/notes.xml
@@ -333,6 +333,38 @@
</section>
+<section><title>SSL 8.2.6.4</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Add engine support for RSA key exchange</p>
+ <p>
+ Own Id: OTP-15420</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>SSL 8.2.6.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Extend check for undelivered data at closing, could under
+ some circumstances fail to deliverd all data that was
+ acctualy recivied.</p>
+ <p>
+ Own Id: OTP-15412</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>SSL 8.2.6.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -3163,5 +3195,3 @@
</section>
</section>
</chapter>
-
-
diff --git a/lib/ssl/src/dtls_record.erl b/lib/ssl/src/dtls_record.erl
index 9eb0d8e2d7..b7346d3ec8 100644
--- a/lib/ssl/src/dtls_record.erl
+++ b/lib/ssl/src/dtls_record.erl
@@ -499,23 +499,22 @@ encode_dtls_cipher_text(Type, {MajVer, MinVer}, Fragment,
WriteState#{sequence_number => Seq + 1}}.
encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
+ cipher_state := CipherS0,
epoch := Epoch,
sequence_number := Seq,
- cipher_state := CipherS0,
security_parameters :=
#security_parameters{
cipher_type = ?AEAD,
- bulk_cipher_algorithm =
- BulkCipherAlgo,
+ bulk_cipher_algorithm = BCAlg,
compression_algorithm = CompAlg}
} = WriteState0) ->
{Comp, CompS1} = ssl_record:compress(CompAlg, Data, CompS0),
- AAD = calc_aad(Type, Version, Epoch, Seq),
+ AAD = start_additional_data(Type, Version, Epoch, Seq),
+ CipherS = ssl_record:nonce_seed(BCAlg, <<?UINT16(Epoch), ?UINT48(Seq)>>, CipherS0),
+ WriteState = WriteState0#{compression_state => CompS1,
+ cipher_state => CipherS},
TLSVersion = dtls_v1:corresponding_tls_version(Version),
- {CipherFragment, CipherS1} =
- ssl_cipher:cipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, Comp, TLSVersion),
- {CipherFragment, WriteState0#{compression_state => CompS1,
- cipher_state => CipherS1}};
+ ssl_record:cipher_aead(TLSVersion, Comp, WriteState, AAD);
encode_plain_text(Type, Version, Fragment, #{compression_state := CompS0,
epoch := Epoch,
sequence_number := Seq,
@@ -547,9 +546,10 @@ decode_cipher_text(#ssl_tls{type = Type, version = Version,
BulkCipherAlgo,
compression_algorithm = CompAlg}} = ReadState0,
ConnnectionStates0) ->
- AAD = calc_aad(Type, Version, Epoch, Seq),
+ AAD = start_additional_data(Type, Version, Epoch, Seq),
+ CipherS1 = ssl_record:nonce_seed(BulkCipherAlgo, <<?UINT16(Epoch), ?UINT48(Seq)>>, CipherS0),
TLSVersion = dtls_v1:corresponding_tls_version(Version),
- case ssl_cipher:decipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, CipherFragment, TLSVersion) of
+ case ssl_record:decipher_aead(BulkCipherAlgo, CipherS1, AAD, CipherFragment, TLSVersion) of
{PlainFragment, CipherState} ->
{Plain, CompressionS1} = ssl_record:uncompress(CompAlg,
PlainFragment, CompressionS0),
@@ -600,7 +600,7 @@ mac_hash({Major, Minor}, MacAlg, MacSecret, Epoch, SeqNo, Type, Length, Fragment
Fragment],
dtls_v1:hmac_hash(MacAlg, MacSecret, Value).
-calc_aad(Type, {MajVer, MinVer}, Epoch, SeqNo) ->
+start_additional_data(Type, {MajVer, MinVer}, Epoch, SeqNo) ->
<<?UINT16(Epoch), ?UINT48(SeqNo), ?BYTE(Type), ?BYTE(MajVer), ?BYTE(MinVer)>>.
%%--------------------------------------------------------------------
diff --git a/lib/ssl/src/inet_tls_dist.erl b/lib/ssl/src/inet_tls_dist.erl
index ca059603ae..a4f8bb7562 100644
--- a/lib/ssl/src/inet_tls_dist.erl
+++ b/lib/ssl/src/inet_tls_dist.erl
@@ -567,7 +567,7 @@ gen_close(Driver, Socket) ->
get_address_resolver(EpmdModule, Driver) ->
case erlang:function_exported(EpmdModule, address_please, 3) of
true -> {EpmdModule, address_please};
- _ -> {Driver, getaddr}
+ _ -> {erl_epmd, address_please}
end.
%% ------------------------------------------------------------
diff --git a/lib/ssl/src/ssl_cipher.erl b/lib/ssl/src/ssl_cipher.erl
index b23129dcdd..54c04c13e5 100644
--- a/lib/ssl/src/ssl_cipher.erl
+++ b/lib/ssl/src/ssl_cipher.erl
@@ -34,7 +34,7 @@
-include_lib("public_key/include/public_key.hrl").
-export([security_parameters/2, security_parameters/3,
- cipher_init/3, decipher/6, cipher/5, decipher_aead/6, cipher_aead/6,
+ cipher_init/3, nonce_seed/2, decipher/6, cipher/5, aead_encrypt/5, aead_decrypt/6,
suites/1, all_suites/1, crypto_support_filters/0,
chacha_suites/1, anonymous_suites/1, psk_suites/1, psk_suites_anon/1,
srp_suites/0, srp_suites_anon/0,
@@ -48,6 +48,8 @@
-type cipher_enum() :: integer().
+-export_type([cipher_enum/0]).
+
%%--------------------------------------------------------------------
-spec security_parameters(ssl_cipher_format:cipher_suite(), #security_parameters{}) ->
#security_parameters{}.
@@ -91,10 +93,15 @@ cipher_init(?RC4, IV, Key) ->
#cipher_state{iv = IV, key = Key, state = State};
cipher_init(?AES_GCM, IV, Key) ->
<<Nonce:64>> = random_bytes(8),
- #cipher_state{iv = IV, key = Key, nonce = Nonce};
+ #cipher_state{iv = IV, key = Key, nonce = Nonce, tag_len = 16};
+cipher_init(?CHACHA20_POLY1305, IV, Key) ->
+ #cipher_state{iv = IV, key = Key, tag_len = 16};
cipher_init(_BCA, IV, Key) ->
#cipher_state{iv = IV, key = Key}.
+nonce_seed(Seed, CipherState) ->
+ CipherState#cipher_state{nonce = Seed}.
+
%%--------------------------------------------------------------------
-spec cipher(cipher_enum(), #cipher_state{}, binary(), iodata(), ssl_record:ssl_version()) ->
{binary(), #cipher_state{}}.
@@ -126,32 +133,16 @@ cipher(?AES_CBC, CipherState, Mac, Fragment, Version) ->
crypto:block_encrypt(aes_cbc256, Key, IV, T)
end, block_size(aes_128_cbc), CipherState, Mac, Fragment, Version).
-%%--------------------------------------------------------------------
--spec cipher_aead(cipher_enum(), #cipher_state{}, integer(), binary(), iodata(), ssl_record:ssl_version()) ->
- {binary(), #cipher_state{}}.
-%%
-%% Description: Encrypts the data and protects associated data (AAD) using chipher
-%% described by cipher_enum() and updating the cipher state
-%% Use for suites that use authenticated encryption with associated data (AEAD)
-%%-------------------------------------------------------------------
-cipher_aead(?AES_GCM, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_cipher(aes_gcm, CipherState, SeqNo, AAD, Fragment, Version);
-cipher_aead(?CHACHA20_POLY1305, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_cipher(chacha20_poly1305, CipherState, SeqNo, AAD, Fragment, Version).
-
-aead_cipher(chacha20_poly1305, #cipher_state{key=Key} = CipherState, SeqNo, AAD0, Fragment, _Version) ->
- CipherLen = erlang:iolist_size(Fragment),
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- Nonce = ?uint64(SeqNo),
- {Content, CipherTag} = crypto:block_encrypt(chacha20_poly1305, Key, Nonce, {AAD, Fragment}),
- {<<Content/binary, CipherTag/binary>>, CipherState};
-aead_cipher(Type, #cipher_state{key=Key, iv = IV0, nonce = Nonce} = CipherState, _SeqNo, AAD0, Fragment, _Version) ->
- CipherLen = erlang:iolist_size(Fragment),
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- <<Salt:4/bytes, _/binary>> = IV0,
- IV = <<Salt/binary, Nonce:64/integer>>,
- {Content, CipherTag} = crypto:block_encrypt(Type, Key, IV, {AAD, Fragment}),
- {<<Nonce:64/integer, Content/binary, CipherTag/binary>>, CipherState#cipher_state{nonce = Nonce + 1}}.
+aead_encrypt(Type, Key, Nonce, Fragment, AdditionalData) ->
+ crypto:block_encrypt(aead_type(Type), Key, Nonce, {AdditionalData, Fragment}).
+
+aead_decrypt(Type, Key, Nonce, CipherText, CipherTag, AdditionalData) ->
+ crypto:block_decrypt(aead_type(Type), Key, Nonce, {AdditionalData, CipherText, CipherTag}).
+
+aead_type(?AES_GCM) ->
+ aes_gcm;
+aead_type(?CHACHA20_POLY1305) ->
+ chacha20_poly1305.
build_cipher_block(BlockSz, Mac, Fragment) ->
TotSz = byte_size(Mac) + erlang:iolist_size(Fragment) + 1,
@@ -218,19 +209,6 @@ decipher(?AES_CBC, HashSz, CipherState, Fragment, Version, PaddingCheck) ->
crypto:block_decrypt(aes_cbc256, Key, IV, T)
end, CipherState, HashSz, Fragment, Version, PaddingCheck).
-%%--------------------------------------------------------------------
--spec decipher_aead(cipher_enum(), #cipher_state{}, integer(), binary(), binary(), ssl_record:ssl_version()) ->
- {binary(), #cipher_state{}} | #alert{}.
-%%
-%% Description: Decrypts the data and checks the associated data (AAD) MAC using
-%% cipher described by cipher_enum() and updating the cipher state.
-%% Use for suites that use authenticated encryption with associated data (AEAD)
-%%-------------------------------------------------------------------
-decipher_aead(?AES_GCM, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_decipher(aes_gcm, CipherState, SeqNo, AAD, Fragment, Version);
-decipher_aead(?CHACHA20_POLY1305, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_decipher(chacha20_poly1305, CipherState, SeqNo, AAD, Fragment, Version).
-
block_decipher(Fun, #cipher_state{key=Key, iv=IV} = CipherState0,
HashSz, Fragment, Version, PaddingCheck) ->
try
@@ -261,34 +239,6 @@ block_decipher(Fun, #cipher_state{key=Key, iv=IV} = CipherState0,
?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
end.
-aead_ciphertext_to_state(chacha20_poly1305, SeqNo, _IV, AAD0, Fragment, _Version) ->
- CipherLen = size(Fragment) - 16,
- <<CipherText:CipherLen/bytes, CipherTag:16/bytes>> = Fragment,
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- Nonce = ?uint64(SeqNo),
- {Nonce, AAD, CipherText, CipherTag};
-aead_ciphertext_to_state(_, _SeqNo, <<Salt:4/bytes, _/binary>>, AAD0, Fragment, _Version) ->
- CipherLen = size(Fragment) - 24,
- <<ExplicitNonce:8/bytes, CipherText:CipherLen/bytes, CipherTag:16/bytes>> = Fragment,
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- Nonce = <<Salt/binary, ExplicitNonce/binary>>,
- {Nonce, AAD, CipherText, CipherTag}.
-
-aead_decipher(Type, #cipher_state{key = Key, iv = IV} = CipherState,
- SeqNo, AAD0, Fragment, Version) ->
- try
- {Nonce, AAD, CipherText, CipherTag} = aead_ciphertext_to_state(Type, SeqNo, IV, AAD0, Fragment, Version),
- case crypto:block_decrypt(Type, Key, Nonce, {AAD, CipherText, CipherTag}) of
- Content when is_binary(Content) ->
- {Content, CipherState};
- _ ->
- ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
- end
- catch
- _:_ ->
- ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
- end.
-
%%--------------------------------------------------------------------
-spec suites(ssl_record:ssl_version()) -> [ssl_cipher_format:cipher_suite()].
%%
@@ -982,7 +932,7 @@ filter_suites_pubkey(ec, Ciphers, _, OtpCert) ->
ec_ecdhe_suites(Ciphers)),
filter_keyuse_suites(keyAgreement, Uses, CiphersSuites, ec_ecdh_suites(Ciphers)).
-filter_suites_signature(rsa, Ciphers, {3, N}) when N >= 3 ->
+filter_suites_signature(_, Ciphers, {3, N}) when N >= 3 ->
Ciphers;
filter_suites_signature(rsa, Ciphers, Version) ->
(Ciphers -- ecdsa_signed_suites(Ciphers, Version)) -- dsa_signed_suites(Ciphers, Version);
diff --git a/lib/ssl/src/ssl_cipher.hrl b/lib/ssl/src/ssl_cipher.hrl
index ba6a98b92a..2371e8bd32 100644
--- a/lib/ssl/src/ssl_cipher.hrl
+++ b/lib/ssl/src/ssl_cipher.hrl
@@ -48,7 +48,8 @@
iv,
key,
state,
- nonce
+ nonce,
+ tag_len
}).
%%% TLS_NULL_WITH_NULL_NULL is specified and is the initial state of a
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index 9f876add6c..acd9f14f7b 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -621,8 +621,10 @@ init({call, From}, {start, {Opts, EmOpts}, Timeout},
catch throw:Error ->
stop_and_reply(normal, {reply, From, {error, Error}}, State0)
end;
-init({call, From}, Msg, State, Connection) ->
+init({call, From}, {new_user, _} = Msg, State, Connection) ->
handle_call(Msg, From, ?FUNCTION_NAME, State, Connection);
+init({call, From}, _Msg, _State, _Connection) ->
+ {keep_state_and_data, [{reply, From, {error, notsup_on_transport_accept_socket}}]};
init(_Type, _Event, _State, _Connection) ->
{keep_state_and_data, [postpone]}.
diff --git a/lib/ssl/src/ssl_handshake.erl b/lib/ssl/src/ssl_handshake.erl
index dc89fb0029..14df1d2e02 100644
--- a/lib/ssl/src/ssl_handshake.erl
+++ b/lib/ssl/src/ssl_handshake.erl
@@ -925,6 +925,13 @@ premaster_secret(EncSecret, #'RSAPrivateKey'{} = RSAPrivateKey) ->
catch
_:_ ->
throw(?ALERT_REC(?FATAL, ?DECRYPT_ERROR))
+ end;
+premaster_secret(EncSecret, #{algorithm := rsa} = Engine) ->
+ try crypto:private_decrypt(rsa, EncSecret, maps:remove(algorithm, Engine),
+ [{rsa_pad, rsa_pkcs1_padding}])
+ catch
+ _:_ ->
+ throw(?ALERT_REC(?FATAL, ?DECRYPT_ERROR))
end.
%%====================================================================
%% Extensions handling
diff --git a/lib/ssl/src/ssl_pem_cache.erl b/lib/ssl/src/ssl_pem_cache.erl
index b7d23ef01e..41bca2f7b5 100644
--- a/lib/ssl/src/ssl_pem_cache.erl
+++ b/lib/ssl/src/ssl_pem_cache.erl
@@ -45,7 +45,7 @@
-record(state, {
pem_cache,
- last_pem_check :: erlang:timestamp(),
+ last_pem_check :: integer(),
clear :: integer()
}).
@@ -134,8 +134,9 @@ init([Name]) ->
PemCache = ssl_pkix_db:create_pem_cache(Name),
Interval = pem_check_interval(),
erlang:send_after(Interval, self(), clear_pem_cache),
+ erlang:system_time(second),
{ok, #state{pem_cache = PemCache,
- last_pem_check = os:timestamp(),
+ last_pem_check = erlang:convert_time_unit(os:system_time(), native, second),
clear = Interval
}}.
@@ -183,7 +184,7 @@ handle_cast({invalidate_pem, File}, #state{pem_cache = Db} = State) ->
handle_info(clear_pem_cache, #state{pem_cache = PemCache,
clear = Interval,
last_pem_check = CheckPoint} = State) ->
- NewCheckPoint = os:timestamp(),
+ NewCheckPoint = erlang:convert_time_unit(os:system_time(), native, second),
start_pem_cache_validator(PemCache, CheckPoint),
erlang:send_after(Interval, self(), clear_pem_cache),
{noreply, State#state{last_pem_check = NewCheckPoint}};
@@ -229,24 +230,14 @@ init_pem_cache_validator([CacheName, PemCache, CheckPoint]) ->
CheckPoint, PemCache).
pem_cache_validate({File, _}, CheckPoint) ->
- case file:read_file_info(File, []) of
- {ok, #file_info{mtime = Time}} ->
- case is_before_checkpoint(Time, CheckPoint) of
- true ->
- ok;
- false ->
- invalidate_pem(File)
- end;
+ case file:read_file_info(File, [{time, posix}]) of
+ {ok, #file_info{mtime = Time}} when Time < CheckPoint ->
+ ok;
_ ->
invalidate_pem(File)
end,
CheckPoint.
-is_before_checkpoint(Time, CheckPoint) ->
- calendar:datetime_to_gregorian_seconds(
- calendar:now_to_datetime(CheckPoint)) -
- calendar:datetime_to_gregorian_seconds(Time) > 0.
-
pem_check_interval() ->
case application:get_env(ssl, ssl_pem_cache_clean) of
{ok, Interval} when is_integer(Interval) ->
diff --git a/lib/ssl/src/ssl_record.erl b/lib/ssl/src/ssl_record.erl
index 659e1485ac..b9d1320ef3 100644
--- a/lib/ssl/src/ssl_record.erl
+++ b/lib/ssl/src/ssl_record.erl
@@ -45,7 +45,7 @@
-export([compress/3, uncompress/3, compressions/0]).
%% Payload encryption/decryption
--export([cipher/4, decipher/4, cipher_aead/4, is_correct_mac/2]).
+-export([cipher/4, decipher/4, cipher_aead/4, decipher_aead/5, is_correct_mac/2, nonce_seed/3]).
-export_type([ssl_version/0, ssl_atom_version/0, connection_states/0, connection_state/0]).
@@ -306,22 +306,20 @@ cipher(Version, Fragment,
{CipherFragment, CipherS1} =
ssl_cipher:cipher(BulkCipherAlgo, CipherS0, MacHash, Fragment, Version),
{CipherFragment, WriteState0#{cipher_state => CipherS1}}.
-%% %%--------------------------------------------------------------------
-%% -spec cipher_aead(ssl_version(), iodata(), connection_state(), MacHash::binary()) ->
-%% {CipherFragment::binary(), connection_state()}.
-%% %%
-%% %% Description: Payload encryption
+%%--------------------------------------------------------------------
+-spec cipher_aead(ssl_version(), iodata(), connection_state(), AAD::binary()) ->
+ {CipherFragment::binary(), connection_state()}.
+
+%% Description: Payload encryption
%% %%--------------------------------------------------------------------
cipher_aead(Version, Fragment,
#{cipher_state := CipherS0,
- sequence_number := SeqNo,
security_parameters :=
#security_parameters{bulk_cipher_algorithm =
BulkCipherAlgo}
} = WriteState0, AAD) ->
-
{CipherFragment, CipherS1} =
- ssl_cipher:cipher_aead(BulkCipherAlgo, CipherS0, SeqNo, AAD, Fragment, Version),
+ cipher_aead(BulkCipherAlgo, CipherS0, AAD, Fragment, Version),
{CipherFragment, WriteState0#{cipher_state => CipherS1}}.
%%--------------------------------------------------------------------
@@ -344,10 +342,39 @@ decipher(Version, CipherFragment,
#alert{} = Alert ->
Alert
end.
+%%--------------------------------------------------------------------
+-spec decipher_aead(ssl_cipher:cipher_enum(), #cipher_state{},
+ binary(), binary(), ssl_record:ssl_version()) ->
+ {binary(), #cipher_state{}} | #alert{}.
+%%
+%% Description: Decrypts the data and checks the associated data (AAD) MAC using
+%% cipher described by cipher_enum() and updating the cipher state.
+%% Use for suites that use authenticated encryption with associated data (AEAD)
+%%-------------------------------------------------------------------
+decipher_aead(Type, #cipher_state{key = Key} = CipherState, AAD0, CipherFragment, _) ->
+ try
+ Nonce = decrypt_nonce(Type, CipherState, CipherFragment),
+ {AAD, CipherText, CipherTag} = aead_ciphertext_split(Type, CipherState, CipherFragment, AAD0),
+ case ssl_cipher:aead_decrypt(Type, Key, Nonce, CipherText, CipherTag, AAD) of
+ Content when is_binary(Content) ->
+ {Content, CipherState};
+ _ ->
+ ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
+ end
+ catch
+ _:_ ->
+ ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
+ end.
+
+nonce_seed(?CHACHA20_POLY1305, Seed, CipherState) ->
+ ssl_cipher:nonce_seed(Seed, CipherState);
+nonce_seed(_,_, CipherState) ->
+ CipherState.
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
+
empty_connection_state(ConnectionEnd, BeastMitigation) ->
SecParams = empty_security_params(ConnectionEnd),
#{security_parameters => SecParams,
@@ -400,3 +427,37 @@ initial_security_params(ConnectionEnd) ->
compression_algorithm = ?NULL},
ssl_cipher:security_parameters(?TLS_NULL_WITH_NULL_NULL, SecParams).
+cipher_aead(?CHACHA20_POLY1305 = Type, #cipher_state{key=Key} = CipherState, AAD0, Fragment, _Version) ->
+ AAD = end_additional_data(AAD0, erlang:iolist_size(Fragment)),
+ Nonce = encrypt_nonce(Type, CipherState),
+ {Content, CipherTag} = ssl_cipher:aead_encrypt(Type, Key, Nonce, Fragment, AAD),
+ {<<Content/binary, CipherTag/binary>>, CipherState};
+cipher_aead(Type, #cipher_state{key=Key, nonce = ExplicitNonce} = CipherState, AAD0, Fragment, _Version) ->
+ AAD = end_additional_data(AAD0, erlang:iolist_size(Fragment)),
+ Nonce = encrypt_nonce(Type, CipherState),
+ {Content, CipherTag} = ssl_cipher:aead_encrypt(Type, Key, Nonce, Fragment, AAD),
+ {<<ExplicitNonce:64/integer, Content/binary, CipherTag/binary>>, CipherState#cipher_state{nonce = ExplicitNonce + 1}}.
+
+encrypt_nonce(?CHACHA20_POLY1305, #cipher_state{nonce = Nonce, iv = IV}) ->
+ crypto:exor(<<?UINT32(0), Nonce/binary>>, IV);
+encrypt_nonce(?AES_GCM, #cipher_state{iv = IV, nonce = ExplicitNonce}) ->
+ <<Salt:4/bytes, _/binary>> = IV,
+ <<Salt/binary, ExplicitNonce:64/integer>>.
+
+decrypt_nonce(?CHACHA20_POLY1305, #cipher_state{nonce = Nonce, iv = IV}, _) ->
+ crypto:exor(<<Nonce:96/unsigned-big-integer>>, IV);
+decrypt_nonce(?AES_GCM, #cipher_state{iv = <<Salt:4/bytes, _/binary>>}, <<ExplicitNonce:8/bytes, _/binary>>) ->
+ <<Salt/binary, ExplicitNonce/binary>>.
+
+aead_ciphertext_split(?CHACHA20_POLY1305, #cipher_state{tag_len = Len}, CipherTextFragment, AAD) ->
+ CipherLen = size(CipherTextFragment) - Len,
+ <<CipherText:CipherLen/bytes, CipherTag:Len/bytes>> = CipherTextFragment,
+ {end_additional_data(AAD, CipherLen), CipherText, CipherTag};
+aead_ciphertext_split(?AES_GCM, #cipher_state{tag_len = Len}, CipherTextFragment, AAD) ->
+ CipherLen = size(CipherTextFragment) - (Len + 8), %% 8 is length of explicit Nonce
+ << _:8/bytes, CipherText:CipherLen/bytes, CipherTag:Len/bytes>> = CipherTextFragment,
+ {end_additional_data(AAD, CipherLen), CipherText, CipherTag}.
+
+end_additional_data(AAD, Len) ->
+ <<AAD/binary, ?UINT16(Len)>>.
+
diff --git a/lib/ssl/src/tls_connection.erl b/lib/ssl/src/tls_connection.erl
index adb4f6d9ea..4dfb50967d 100644
--- a/lib/ssl/src/tls_connection.erl
+++ b/lib/ssl/src/tls_connection.erl
@@ -783,6 +783,7 @@ handle_info({CloseTag, Socket}, StateName,
#state{socket = Socket, close_tag = CloseTag,
socket_options = #socket_options{active = Active},
protocol_buffers = #protocol_buffers{tls_cipher_texts = CTs},
+ user_data_buffer = Buffer,
negotiated_version = Version} = State) ->
%% Note that as of TLS 1.1,
@@ -790,7 +791,7 @@ handle_info({CloseTag, Socket}, StateName,
%% session not be resumed. This is a change from TLS 1.0 to conform
%% with widespread implementation practice.
- case (Active == false) andalso (CTs =/= []) of
+ case (Active == false) andalso ((CTs =/= []) or (Buffer =/= <<>>)) of
false ->
case Version of
{1, N} when N >= 1 ->
diff --git a/lib/ssl/src/tls_record.erl b/lib/ssl/src/tls_record.erl
index f1aca8c801..ce7edc9dcd 100644
--- a/lib/ssl/src/tls_record.erl
+++ b/lib/ssl/src/tls_record.erl
@@ -176,14 +176,15 @@ decode_cipher_text(#ssl_tls{type = Type, version = Version,
BulkCipherAlgo,
compression_algorithm = CompAlg}
} = ReadState0} = ConnnectionStates0, _) ->
- AAD = calc_aad(Type, Version, ReadState0),
- case ssl_cipher:decipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, CipherFragment, Version) of
- {PlainFragment, CipherS1} ->
+ AAD = start_additional_data(Type, Version, ReadState0),
+ CipherS1 = ssl_record:nonce_seed(BulkCipherAlgo, <<?UINT64(Seq)>>, CipherS0),
+ case ssl_record:decipher_aead(BulkCipherAlgo, CipherS1, AAD, CipherFragment, Version) of
+ {PlainFragment, CipherState} ->
{Plain, CompressionS1} = ssl_record:uncompress(CompAlg,
PlainFragment, CompressionS0),
ConnnectionStates = ConnnectionStates0#{
current_read => ReadState0#{
- cipher_state => CipherS1,
+ cipher_state => CipherState,
sequence_number => Seq + 1,
compression_state => CompressionS1}},
{CipherText#ssl_tls{fragment = Plain}, ConnnectionStates};
@@ -453,15 +454,20 @@ encode_iolist(Type, Data, Version, ConnectionStates0) ->
{lists:reverse(EncodedMsg), ConnectionStates}.
%%--------------------------------------------------------------------
do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
- security_parameters :=
+ cipher_state := CipherS0,
+ sequence_number := Seq,
+ security_parameters :=
#security_parameters{
cipher_type = ?AEAD,
+ bulk_cipher_algorithm = BCAlg,
compression_algorithm = CompAlg}
} = WriteState0) ->
{Comp, CompS1} = ssl_record:compress(CompAlg, Data, CompS0),
- WriteState1 = WriteState0#{compression_state => CompS1},
- AAD = calc_aad(Type, Version, WriteState1),
- ssl_record:cipher_aead(Version, Comp, WriteState1, AAD);
+ CipherS = ssl_record:nonce_seed(BCAlg, <<?UINT64(Seq)>>, CipherS0),
+ WriteState = WriteState0#{compression_state => CompS1,
+ cipher_state => CipherS},
+ AAD = start_additional_data(Type, Version, WriteState),
+ ssl_record:cipher_aead(Version, Comp, WriteState, AAD);
do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
security_parameters :=
#security_parameters{compression_algorithm = CompAlg}
@@ -473,7 +479,7 @@ do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
do_encode_plain_text(_,_,_,CS) ->
exit({cs, CS}).
%%--------------------------------------------------------------------
-calc_aad(Type, {MajVer, MinVer},
+start_additional_data(Type, {MajVer, MinVer},
#{sequence_number := SeqNo}) ->
<<?UINT64(SeqNo), ?BYTE(Type), ?BYTE(MajVer), ?BYTE(MinVer)>>.
diff --git a/lib/ssl/src/tls_sender.erl b/lib/ssl/src/tls_sender.erl
index 8d1938cee7..a245ee2465 100644
--- a/lib/ssl/src/tls_sender.erl
+++ b/lib/ssl/src/tls_sender.erl
@@ -319,7 +319,7 @@ handle_info({'DOWN', Monitor, _, _, _}, _,
#data{connection_monitor = Monitor} = StateData) ->
{stop, normal, StateData};
handle_info(_,_,_) ->
- {keep_state_and_data}.
+ keep_state_and_data.
send_tls_alert(Alert, #data{negotiated_version = Version,
socket = Socket,
diff --git a/lib/ssl/test/ssl_ECC_SUITE.erl b/lib/ssl/test/ssl_ECC_SUITE.erl
index c93f066825..a5309e866b 100644
--- a/lib/ssl/test/ssl_ECC_SUITE.erl
+++ b/lib/ssl/test/ssl_ECC_SUITE.erl
@@ -395,10 +395,25 @@ client_ecdhe_rsa_server_ecdhe_ecdsa_client_custom(Config) ->
end.
mix_sign(Config) ->
- {COpts0, SOpts0} = ssl_test_lib:make_mix_cert(Config),
+ mix_sign_rsa_peer(Config),
+ mix_sign_ecdsa_peer(Config).
+
+mix_sign_ecdsa_peer(Config) ->
+ {COpts0, SOpts0} = ssl_test_lib:make_mix_cert([{mix, peer_ecc} |Config]),
COpts = ssl_test_lib:ssl_options(COpts0, Config),
SOpts = ssl_test_lib:ssl_options(SOpts0, Config),
ECDHE_ECDSA =
ssl:filter_cipher_suites(ssl:cipher_suites(default, 'tlsv1.2'),
[{key_exchange, fun(ecdhe_ecdsa) -> true; (_) -> false end}]),
ssl_test_lib:basic_test(COpts, [{ciphers, ECDHE_ECDSA} | SOpts], Config).
+
+
+mix_sign_rsa_peer(Config) ->
+ {COpts0, SOpts0} = ssl_test_lib:make_mix_cert([{mix, peer_rsa} |Config]),
+ COpts = ssl_test_lib:ssl_options(COpts0, Config),
+ SOpts = ssl_test_lib:ssl_options(SOpts0, Config),
+ ECDHE_RSA =
+ ssl:filter_cipher_suites(ssl:cipher_suites(default, 'tlsv1.2'),
+ [{key_exchange, fun(ecdhe_rsa) -> true; (_) -> false end}]),
+ ssl_test_lib:basic_test(COpts, [{ciphers, ECDHE_RSA} | SOpts], Config).
+
diff --git a/lib/ssl/test/ssl_basic_SUITE.erl b/lib/ssl/test/ssl_basic_SUITE.erl
index 4585ea7306..6f668f0c00 100644
--- a/lib/ssl/test/ssl_basic_SUITE.erl
+++ b/lib/ssl/test/ssl_basic_SUITE.erl
@@ -244,7 +244,9 @@ error_handling_tests()->
recv_active_once,
recv_error_handling,
call_in_error_state,
- close_in_error_state
+ close_in_error_state,
+ abuse_transport_accept_socket,
+ controlling_process_transport_accept_socket
].
error_handling_tests_tls()->
@@ -4054,7 +4056,51 @@ close_in_error_state(Config) when is_list(Config) ->
Other ->
ct:fail(Other)
end.
+%%--------------------------------------------------------------------
+abuse_transport_accept_socket() ->
+ [{doc,"Only ssl:handshake and ssl:controlling_process is allowed for transport_accept:sockets"}].
+abuse_transport_accept_socket(Config) when is_list(Config) ->
+ ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
+ ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Server = ssl_test_lib:start_server_transport_abuse_socket([{node, ServerNode},
+ {port, 0},
+ {from, self()},
+ {options, ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {ssl_test_lib, no_result, []}},
+ {options, ClientOpts}]),
+ ssl_test_lib:check_result(Server, ok),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
+%%--------------------------------------------------------------------
+controlling_process_transport_accept_socket() ->
+ [{doc,"Only ssl:handshake and ssl:controlling_process is allowed for transport_accept:sockets"}].
+controlling_process_transport_accept_socket(Config) when is_list(Config) ->
+ ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
+ ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Server = ssl_test_lib:start_server_transport_control([{node, ServerNode},
+ {port, 0},
+ {from, self()},
+ {options, ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+
+ _Client = ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {options, ClientOpts}]),
+ ssl_test_lib:check_result(Server, ok),
+ ssl_test_lib:close(Server).
+
+%%--------------------------------------------------------------------
run_error_server_close([Pid | Opts]) ->
{ok, Listen} = ssl:listen(0, Opts),
{ok,{_, Port}} = ssl:sockname(Listen),
diff --git a/lib/ssl/test/ssl_bench_SUITE.erl b/lib/ssl/test/ssl_bench_SUITE.erl
index 3fe6338d69..13097b08b6 100644
--- a/lib/ssl/test/ssl_bench_SUITE.erl
+++ b/lib/ssl/test/ssl_bench_SUITE.erl
@@ -44,6 +44,7 @@ init_per_suite(Config) ->
nonode@nohost ->
{skipped, "Node not distributed"};
_ ->
+ ssl_test_lib:clean_start(),
[{server_node, ssl_bench_test_lib:setup(perf_server)}|Config]
end.
diff --git a/lib/ssl/test/ssl_bench_test_lib.erl b/lib/ssl/test/ssl_bench_test_lib.erl
index e5cbb911bd..47bcd41608 100644
--- a/lib/ssl/test/ssl_bench_test_lib.erl
+++ b/lib/ssl/test/ssl_bench_test_lib.erl
@@ -58,13 +58,13 @@ setup(Name) ->
Path = code:get_path(),
true = rpc:call(Node, code, set_path, [Path]),
ok = rpc:call(Node, ?MODULE, setup_server, [node()]),
- io:format("Client (~p) using ~s~n",[node(), code:which(ssl)]),
+ io:format("Client (~p) using ~ts~n",[node(), code:which(ssl)]),
(Node =:= node()) andalso restrict_schedulers(client),
Node.
setup_server(ClientNode) ->
(ClientNode =:= node()) andalso restrict_schedulers(server),
- io:format("Server (~p) using ~s~n",[node(), code:which(ssl)]),
+ io:format("Server (~p) using ~ts~n",[node(), code:which(ssl)]),
ok.
restrict_schedulers(Type) ->
diff --git a/lib/ssl/test/ssl_engine_SUITE.erl b/lib/ssl/test/ssl_engine_SUITE.erl
index 1423c99dc2..e6c82d3eb5 100644
--- a/lib/ssl/test/ssl_engine_SUITE.erl
+++ b/lib/ssl/test/ssl_engine_SUITE.erl
@@ -90,12 +90,14 @@ end_per_testcase(_TestCase, Config) ->
private_key(Config) when is_list(Config) ->
ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), "client_engine"]),
ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), "server_engine"]),
+ Ext = x509_test:extensions([{key_usage, [digitalSignature, keyEncipherment]}]),
#{server_config := ServerConf,
client_config := ClientConf} = GenCertData =
public_key:pkix_test_data(#{server_chain =>
#{root => [{key, ssl_test_lib:hardcode_rsa_key(1)}],
intermediates => [[{key, ssl_test_lib:hardcode_rsa_key(2)}]],
- peer => [{key, ssl_test_lib:hardcode_rsa_key(3)}
+ peer => [{extensions, Ext},
+ {key, ssl_test_lib:hardcode_rsa_key(3)}
]},
client_chain =>
#{root => [{key, ssl_test_lib:hardcode_rsa_key(4)}],
@@ -131,6 +133,12 @@ private_key(Config) when is_list(Config) ->
%% Test with engine
test_tls_connection(EngineServerConf, EngineClientConf, Config),
+ %% Test with engine and rsa keyexchange
+ RSASuites = all_kex_rsa_suites([{tls_version, 'tlsv1.2'} | Config]),
+
+ test_tls_connection([{ciphers, RSASuites}, {versions, ['tlsv1.2']} | EngineServerConf],
+ [{ciphers, RSASuites}, {versions, ['tlsv1.2']} | EngineClientConf], Config),
+
%% Test with engine and present file arugments
test_tls_connection(EngineFileServerConf, EngineFileClientConf, Config),
@@ -160,3 +168,8 @@ test_tls_connection(ServerConf, ClientConf, Config) ->
ssl_test_lib:check_result(Server, ok, Client, ok),
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
+
+all_kex_rsa_suites(Config) ->
+ Version = proplists:get_value(tls_version, Config),
+ All = ssl:cipher_suites(all, Version),
+ ssl:filter_cipher_suites(All,[{key_exchange, fun(rsa) -> true;(_) -> false end}]).
diff --git a/lib/ssl/test/ssl_test_lib.erl b/lib/ssl/test/ssl_test_lib.erl
index 39a5bcaad6..8a2f0824fb 100644
--- a/lib/ssl/test/ssl_test_lib.erl
+++ b/lib/ssl/test/ssl_test_lib.erl
@@ -196,6 +196,55 @@ connect(ListenSocket, Node, _, _, Timeout, Opts, _) ->
rpc:call(Node, ssl, ssl_accept, [AcceptSocket, Opts, Timeout]),
AcceptSocket.
+
+start_server_transport_abuse_socket(Args) ->
+ Result = spawn_link(?MODULE, transport_accept_abuse, [Args]),
+ receive
+ {listen, up} ->
+ Result
+ end.
+
+start_server_transport_control(Args) ->
+ Result = spawn_link(?MODULE, transport_switch_control, [Args]),
+ receive
+ {listen, up} ->
+ Result
+ end.
+
+
+transport_accept_abuse(Opts) ->
+ Node = proplists:get_value(node, Opts),
+ Port = proplists:get_value(port, Opts),
+ Options = proplists:get_value(options, Opts),
+ Pid = proplists:get_value(from, Opts),
+ Transport = proplists:get_value(transport, Opts, ssl),
+ ct:log("~p:~p~nssl:listen(~p, ~p)~n", [?MODULE,?LINE, Port, Options]),
+ {ok, ListenSocket} = rpc:call(Node, Transport, listen, [Port, Options]),
+ Pid ! {listen, up},
+ send_selected_port(Pid, Port, ListenSocket),
+ {ok, AcceptSocket} = rpc:call(Node, ssl, transport_accept,
+ [ListenSocket]),
+ {error, _} = rpc:call(Node, ssl, connection_information, [AcceptSocket]),
+ _ = rpc:call(Node, ssl, handshake, [AcceptSocket, infinity]),
+ Pid ! {self(), ok}.
+
+
+transport_switch_control(Opts) ->
+ Node = proplists:get_value(node, Opts),
+ Port = proplists:get_value(port, Opts),
+ Options = proplists:get_value(options, Opts),
+ Pid = proplists:get_value(from, Opts),
+ Transport = proplists:get_value(transport, Opts, ssl),
+ ct:log("~p:~p~nssl:listen(~p, ~p)~n", [?MODULE,?LINE, Port, Options]),
+ {ok, ListenSocket} = rpc:call(Node, Transport, listen, [Port, Options]),
+ Pid ! {listen, up},
+ send_selected_port(Pid, Port, ListenSocket),
+ {ok, AcceptSocket} = rpc:call(Node, ssl, transport_accept,
+ [ListenSocket]),
+ ok = rpc:call(Node, ssl, controlling_process, [AcceptSocket, self()]),
+ Pid ! {self(), ok}.
+
+
remove_close_msg(0) ->
ok;
remove_close_msg(ReconnectTimes) ->
@@ -693,20 +742,12 @@ make_mix_cert(Config) ->
Ext = x509_test:extensions([{key_usage, [digitalSignature]}]),
Digest = {digest, appropriate_sha(crypto:supports())},
CurveOid = hd(tls_v1:ecc_curves(0)),
- ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix"]),
- ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix"]),
- ClientChain = [[Digest, {key, {namedCurve, CurveOid}}],
- [Digest, {key, hardcode_rsa_key(1)}],
- [Digest, {key, {namedCurve, CurveOid}}, {extensions, Ext}]
- ],
- ServerChain = [[Digest, {key, {namedCurve, CurveOid}}],
- [Digest, {key, hardcode_rsa_key(2)}],
- [Digest, {key, {namedCurve, CurveOid}},{extensions, Ext}]
- ],
+ Mix = proplists:get_value(mix, Config, peer_ecc),
ClientChainType =ServerChainType = mix,
+ {ClientChain, ServerChain} = mix(Mix, Digest, CurveOid, Ext),
CertChainConf = gen_conf(ClientChainType, ServerChainType, ClientChain, ServerChain),
- ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), atom_to_list(ClientChainType)]),
- ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), atom_to_list(ServerChainType)]),
+ ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix" ++ atom_to_list(Mix)]),
+ ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix" ++ atom_to_list(Mix)]),
GenCertData = public_key:pkix_test_data(CertChainConf),
[{server_config, ServerConf},
{client_config, ClientConf}] =
@@ -715,6 +756,28 @@ make_mix_cert(Config) ->
[{reuseaddr, true}, {verify, verify_peer} | ServerConf]
}.
+mix(peer_ecc, Digest, CurveOid, Ext) ->
+ ClientChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(1)}],
+ [Digest, {key, {namedCurve, CurveOid}}, {extensions, Ext}]
+ ],
+ ServerChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(2)}],
+ [Digest, {key, {namedCurve, CurveOid}},{extensions, Ext}]
+ ],
+ {ClientChain, ServerChain};
+
+mix(peer_rsa, Digest, CurveOid, Ext) ->
+ ClientChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(1)}, {extensions, Ext}]
+ ],
+ ServerChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(2)},{extensions, Ext}]
+ ],
+ {ClientChain, ServerChain}.
+
make_ecdsa_cert(Config) ->
CryptoSupport = crypto:supports(),
case proplists:get_bool(ecdsa, proplists:get_value(public_keys, CryptoSupport)) of
diff --git a/lib/stdlib/doc/src/beam_lib.xml b/lib/stdlib/doc/src/beam_lib.xml
index 26d0724aaf..213170df7f 100644
--- a/lib/stdlib/doc/src/beam_lib.xml
+++ b/lib/stdlib/doc/src/beam_lib.xml
@@ -180,8 +180,8 @@ io:fwrite("~s~n", [erl_prettypr:format(erl_syntax:form_list(AC))]).</code>
<name name="beam"/>
<desc>
<p>Each of the functions described below accept either the
- module name, the filename, or a binary containing the BEAM
- module.</p>
+ filename (as a string) or a binary containing the BEAM
+ module.</p>
</desc>
</datatype>
<datatype>
diff --git a/lib/stdlib/doc/src/gen_event.xml b/lib/stdlib/doc/src/gen_event.xml
index f793ec7fdf..fc34e51216 100644
--- a/lib/stdlib/doc/src/gen_event.xml
+++ b/lib/stdlib/doc/src/gen_event.xml
@@ -775,7 +775,7 @@ gen_event:stop -----> Module:terminate/2
<p>This callback is optional, so callback modules need not
export it. The <c>gen_event</c> module provides a default
implementation of this function that logs about the unexpected
- <c>Info</c> message, drops it and returns <c>{noreply, State}</c>.</p>
+ <c>Info</c> message, drops it and returns <c>{ok, State}</c>.</p>
</note>
<p>This function is called for each installed event handler when
an event manager receives any other message than an event or
diff --git a/lib/stdlib/doc/src/lists.xml b/lib/stdlib/doc/src/lists.xml
index c3d5d7e07a..e4215a5336 100644
--- a/lib/stdlib/doc/src/lists.xml
+++ b/lib/stdlib/doc/src/lists.xml
@@ -850,14 +850,6 @@ splitwith(Pred, List) ->
> <input>lists:subtract("123212", "212").</input>
"312".</pre>
<p><c>lists:subtract(A, B)</c> is equivalent to <c>A -- B</c>.</p>
- <warning>
- <p>The complexity of <c>lists:subtract(A, B)</c> is proportional to
- <c>length(A)*length(B)</c>, meaning that it is very slow if both
- <c>A</c> and <c>B</c> are long lists. (If both lists are long, it
- is a much better choice to use ordered lists and
- <seealso marker="ordsets#subtract/2">
- <c>ordsets:subtract/2</c></seealso>.</p>
- </warning>
</desc>
</func>
diff --git a/lib/stdlib/doc/src/notes.xml b/lib/stdlib/doc/src/notes.xml
index d800885b16..039f087708 100644
--- a/lib/stdlib/doc/src/notes.xml
+++ b/lib/stdlib/doc/src/notes.xml
@@ -504,6 +504,21 @@
</section>
+<section><title>STDLIB 3.4.5.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>List subtraction (The <c>--</c> operator) will now
+ yield properly on large inputs.</p>
+ <p>
+ Own Id: OTP-15371</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>STDLIB 3.4.5</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -1658,6 +1673,21 @@
</section>
+<section><title>STDLIB 2.8.0.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>List subtraction (The <c>--</c> operator) will now
+ yield properly on large inputs.</p>
+ <p>
+ Own Id: OTP-15371</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>STDLIB 2.8</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -7827,4 +7857,3 @@
</section>
</section>
</chapter>
-
diff --git a/lib/stdlib/src/beam_lib.erl b/lib/stdlib/src/beam_lib.erl
index 01181b1097..3386cfcbe6 100644
--- a/lib/stdlib/src/beam_lib.erl
+++ b/lib/stdlib/src/beam_lib.erl
@@ -53,7 +53,7 @@
%%-------------------------------------------------------------------------
--type beam() :: module() | file:filename() | binary().
+-type beam() :: file:filename() | binary().
-type debug_info() :: {DbgiVersion :: atom(), Backend :: module(), Data :: term()} | 'no_debug_info'.
-type forms() :: [erl_parse:abstract_form() | erl_parse:form_info()].
diff --git a/lib/stdlib/test/gen_fsm_SUITE.erl b/lib/stdlib/test/gen_fsm_SUITE.erl
index 41ee3246f5..a8264e5a84 100644
--- a/lib/stdlib/test/gen_fsm_SUITE.erl
+++ b/lib/stdlib/test/gen_fsm_SUITE.erl
@@ -124,8 +124,10 @@ start2(Config) when is_list(Config) ->
{ok, Pid0} = gen_fsm:start(gen_fsm_SUITE, [], []),
ok = do_func_test(Pid0),
ok = do_sync_func_test(Pid0),
+ MRef = monitor(process,Pid0),
shutdown_stopped =
gen_fsm:sync_send_all_state_event(Pid0, stop_shutdown),
+ receive {'DOWN',MRef,_,_,shutdown} -> ok end,
{'EXIT', {noproc,_}} =
(catch gen_fsm:sync_send_event(Pid0, hej)),
diff --git a/lib/stdlib/test/lists_SUITE.erl b/lib/stdlib/test/lists_SUITE.erl
index 837ab4e97e..984b51e7ae 100644
--- a/lib/stdlib/test/lists_SUITE.erl
+++ b/lib/stdlib/test/lists_SUITE.erl
@@ -2597,6 +2597,20 @@ subtract(Config) when is_list(Config) ->
{'EXIT',_} = (catch sub([a|b], [])),
{'EXIT',_} = (catch sub([a|b], [a])),
+ %% Trapping, both crashing and otherwise.
+ [sub_trapping(N) || N <- lists:seq(0, 18)],
+
+ %% The current implementation chooses which algorithm to use based on
+ %% certain thresholds, and we need proper coverage for all corner cases.
+ [sub_thresholds(N) || N <- lists:seq(0, 32)],
+
+ %% Trapping, both crashing and otherwise.
+ [sub_trapping(N) || N <- lists:seq(0, 18)],
+
+ %% The current implementation chooses which algorithm to use based on
+ %% certain thresholds, and we need proper coverage for all corner cases.
+ [sub_thresholds(N) || N <- lists:seq(0, 32)],
+
ok.
sub_non_matching(A, B) ->
@@ -2606,6 +2620,41 @@ sub(A, B) ->
Res = A -- B,
Res = lists:subtract(A, B).
+sub_trapping(N) ->
+ List = lists:duplicate(N + (1 bsl N), gurka),
+ ImproperList = List ++ crash,
+
+ {'EXIT',_} = (catch sub_trapping_1(ImproperList, [])),
+ {'EXIT',_} = (catch sub_trapping_1(List, ImproperList)),
+
+ List = List -- lists:duplicate(N + (1 bsl N), gaffel),
+ ok = sub_trapping_1(List, []).
+
+sub_trapping_1([], _) -> ok;
+sub_trapping_1(L, R) -> sub_trapping_1(L -- R, [gurka | R]).
+
+sub_thresholds(N) ->
+ %% This needs to be long enough to cause trapping.
+ OtherLen = 1 bsl 18,
+ Other = lists:seq(0, OtherLen - 1),
+
+ Disjoint = lists:seq(-N, -1),
+ Subset = lists:seq(1, N),
+
+ %% LHS is disjoint from RHS, so all elements must be retained.
+ Disjoint = Disjoint -- Other,
+
+ %% LHS is covered by RHS, so all elements must be removed.
+ [] = Subset -- Other,
+
+ %% RHS is disjoint from LHS, so all elements must be retained.
+ Other = Other -- Disjoint,
+
+ %% RHS is covered by LHS, so N elements must be removed.
+ N = OtherLen - length(Other -- Subset),
+
+ ok.
+
%% Test lists:droplast/1
droplast(Config) when is_list(Config) ->
[] = lists:droplast([x]),
diff --git a/lib/stdlib/test/sys_SUITE.erl b/lib/stdlib/test/sys_SUITE.erl
index 3278eb0eb0..fcc4419569 100644
--- a/lib/stdlib/test/sys_SUITE.erl
+++ b/lib/stdlib/test/sys_SUITE.erl
@@ -219,7 +219,7 @@ spec_proc(Mod) ->
{Mod,system_get_state},{throw,fail}},_}} ->
ok
end,
- ok = sys:terminate(Mod, normal),
+ ok = sync_terminate(Mod),
{ok,_} = Mod:start_link(4),
ok = case catch sys:replace_state(Mod, fun(_) -> {} end) of
{} ->
@@ -228,7 +228,7 @@ spec_proc(Mod) ->
{Mod,system_replace_state},{throw,fail}},_}} ->
ok
end,
- ok = sys:terminate(Mod, normal),
+ ok = sync_terminate(Mod),
{ok,_} = Mod:start_link(4),
StateFun = fun(_) -> error(fail) end,
ok = case catch sys:replace_state(Mod, StateFun) of
@@ -240,7 +240,18 @@ spec_proc(Mod) ->
{'EXIT',{{callback_failed,StateFun,{error,fail}},_}} ->
ok
end,
- ok = sys:terminate(Mod, normal).
+ ok = sync_terminate(Mod).
+
+sync_terminate(Mod) ->
+ P = whereis(Mod),
+ MRef = erlang:monitor(process,P),
+ ok = sys:terminate(Mod, normal),
+ receive
+ {'DOWN',MRef,_,_,normal} ->
+ ok
+ end,
+ undefined = whereis(Mod),
+ ok.
%%%%%%%%%%%%%%%%%%%%
%% Dummy server
diff --git a/make/fixup_development_runtime_dependencies b/make/fixup_development_runtime_dependencies
new file mode 100755
index 0000000000..e06bd5faca
--- /dev/null
+++ b/make/fixup_development_runtime_dependencies
@@ -0,0 +1,111 @@
+#!/usr/bin/env perl
+
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2018. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# %CopyrightEnd%
+#
+
+#
+# Replaces runtime_dependencies pointing to future not yet
+# resolved versions in the maint and master branches while under
+# development. Such dependencies may exist in .app files on the
+# form (<app name>-@<ticket>(:<ticket>)*@) and will be replaced
+# with the current version of the application in the source tree.
+# This in order not to break tests looking at runtime_dependencies.
+#
+
+use strict;
+use File::Basename;
+
+my $usage_text = <<"HERE";
+ usage: $0 <ERL_TOP>
+HERE
+
+my %app_vsn;
+my $exit_status = 0;
+
+@ARGV == 1 or die $usage_text;
+my $erl_top = shift @ARGV;
+
+chdir $erl_top or die "Failed to change directory into '$erl_top'";
+
+print "Fixup of development runtime dependencies\n";
+
+#
+# Determine versions of all applications in the source tree...
+#
+foreach my $vsn_mk (<lib/*/vsn.mk>, <erts/vsn.mk>) {
+ my $app_dir = dirname($vsn_mk);
+ my $app = basename($app_dir);
+
+ if (!open(VSN, $vsn_mk)) {
+ $exit_status = 1;
+ print STDERR "ERROR: Failed to open '$vsn_mk' for reading: $!\n";
+ }
+ else {
+ my $vsn = '';
+ while (<VSN>) {
+ if (/VSN\s*=\s*(\S+)/) {
+ $vsn = $1;
+ last;
+ }
+ }
+ close VSN;
+ if (!$vsn) {
+ $exit_status = 1;
+ print STDERR "ERROR: No version found in '$vsn_mk'\n"
+ }
+ else {
+ $app_vsn{$app} = "$app-$vsn";
+ }
+ }
+}
+
+my $valid_apps = join('|', keys %app_vsn);
+
+#
+# Replace all <app name>-@<ticket>(:<ticket>)*@ versions
+# in all *.app files with the versions currently used...
+#
+foreach my $app_file (<lib/*/ebin/*.app>, <erts/preloaded/ebin/erts.app>) {
+ if (!open(IN, "<", $app_file)) {
+ $exit_status = 1;
+ print STDERR "ERROR: Failed to open '$app_file' for reading: $!";
+ }
+ else {
+ local $/;
+ my $file = <IN>;
+ close IN;
+ my $old_file = $file;
+
+ $file =~ s/($valid_apps)-\@OTP-\d{4,5}(?::OTP-\d{4,5})*\@/$app_vsn{$1}/g;
+
+ if ($file ne $old_file) {
+ if (!open(OUT, ">", $app_file)) {
+ $exit_status = 1;
+ print STDERR "ERROR: Failed to open '$app_file' for writing: $!";
+ }
+ else {
+ print OUT $file;
+ close OUT;
+ }
+ }
+ }
+}
+
+exit $exit_status;
diff --git a/make/otp_patch_solve_forward_merge_version b/make/otp_patch_solve_forward_merge_version
new file mode 100644
index 0000000000..b8626c4cff
--- /dev/null
+++ b/make/otp_patch_solve_forward_merge_version
@@ -0,0 +1 @@
+4
diff --git a/make/otp_version_tickets b/make/otp_version_tickets
index e69de29bb2..b8220e1a87 100644
--- a/make/otp_version_tickets
+++ b/make/otp_version_tickets
@@ -0,0 +1 @@
+DEVELOPMENT
diff --git a/otp_versions.table b/otp_versions.table
index c6544dfc90..d77f2cfb9d 100644
--- a/otp_versions.table
+++ b/otp_versions.table
@@ -10,6 +10,11 @@ OTP-21.0.3 : erts-10.0.3 # asn1-5.0.6 common_test-1.16 compiler-7.2.2 crypto-4.3
OTP-21.0.2 : compiler-7.2.2 erts-10.0.2 public_key-1.6.1 stdlib-3.5.1 # asn1-5.0.6 common_test-1.16 crypto-4.3 debugger-4.2.5 dialyzer-3.3 diameter-2.1.5 edoc-0.9.3 eldap-1.2.4 erl_docgen-0.8 erl_interface-3.10.3 et-1.6.2 eunit-2.3.6 ftp-1.0 hipe-3.18 inets-7.0 jinterface-1.9 kernel-6.0 megaco-3.18.3 mnesia-4.15.4 observer-2.8 odbc-2.12.1 os_mon-2.4.5 otp_mibs-1.2 parsetools-2.1.7 reltool-0.7.6 runtime_tools-1.13 sasl-3.2 snmp-5.2.11 ssh-4.7 ssl-9.0 syntax_tools-2.1.5 tftp-1.0 tools-3.0 wx-1.8.4 xmerl-1.3.17 :
OTP-21.0.1 : compiler-7.2.1 erts-10.0.1 # asn1-5.0.6 common_test-1.16 crypto-4.3 debugger-4.2.5 dialyzer-3.3 diameter-2.1.5 edoc-0.9.3 eldap-1.2.4 erl_docgen-0.8 erl_interface-3.10.3 et-1.6.2 eunit-2.3.6 ftp-1.0 hipe-3.18 inets-7.0 jinterface-1.9 kernel-6.0 megaco-3.18.3 mnesia-4.15.4 observer-2.8 odbc-2.12.1 os_mon-2.4.5 otp_mibs-1.2 parsetools-2.1.7 public_key-1.6 reltool-0.7.6 runtime_tools-1.13 sasl-3.2 snmp-5.2.11 ssh-4.7 ssl-9.0 stdlib-3.5 syntax_tools-2.1.5 tftp-1.0 tools-3.0 wx-1.8.4 xmerl-1.3.17 :
OTP-21.0 : asn1-5.0.6 common_test-1.16 compiler-7.2 crypto-4.3 debugger-4.2.5 dialyzer-3.3 diameter-2.1.5 edoc-0.9.3 eldap-1.2.4 erl_docgen-0.8 erl_interface-3.10.3 erts-10.0 et-1.6.2 eunit-2.3.6 ftp-1.0 hipe-3.18 inets-7.0 jinterface-1.9 kernel-6.0 mnesia-4.15.4 observer-2.8 os_mon-2.4.5 otp_mibs-1.2 parsetools-2.1.7 public_key-1.6 reltool-0.7.6 runtime_tools-1.13 sasl-3.2 ssh-4.7 ssl-9.0 stdlib-3.5 syntax_tools-2.1.5 tftp-1.0 tools-3.0 wx-1.8.4 xmerl-1.3.17 # megaco-3.18.3 odbc-2.12.1 snmp-5.2.11 :
+OTP-20.3.8.14 : ssh-4.6.9.2 # asn1-5.0.5.1 common_test-1.15.4 compiler-7.1.5.2 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.2.2 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3.1 erl_docgen-0.7.3 erl_interface-3.10.2.1 erts-9.3.3.6 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4.2 inets-6.5.2.4 jinterface-1.8.1 kernel-5.4.3.2 megaco-3.18.3 mnesia-4.15.3.2 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.11 ssl-8.2.6.4 stdlib-3.4.5.1 syntax_tools-2.1.4.1 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
+OTP-20.3.8.13 : ssl-8.2.6.4 # asn1-5.0.5.1 common_test-1.15.4 compiler-7.1.5.2 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.2.2 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3.1 erl_docgen-0.7.3 erl_interface-3.10.2.1 erts-9.3.3.6 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4.2 inets-6.5.2.4 jinterface-1.8.1 kernel-5.4.3.2 megaco-3.18.3 mnesia-4.15.3.2 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.11 ssh-4.6.9.1 stdlib-3.4.5.1 syntax_tools-2.1.4.1 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
+OTP-20.3.8.12 : erts-9.3.3.6 ssl-8.2.6.3 stdlib-3.4.5.1 # asn1-5.0.5.1 common_test-1.15.4 compiler-7.1.5.2 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.2.2 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3.1 erl_docgen-0.7.3 erl_interface-3.10.2.1 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4.2 inets-6.5.2.4 jinterface-1.8.1 kernel-5.4.3.2 megaco-3.18.3 mnesia-4.15.3.2 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.11 ssh-4.6.9.1 syntax_tools-2.1.4.1 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
+OTP-20.3.8.11 : erts-9.3.3.5 # asn1-5.0.5.1 common_test-1.15.4 compiler-7.1.5.2 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.2.2 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3.1 erl_docgen-0.7.3 erl_interface-3.10.2.1 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4.2 inets-6.5.2.4 jinterface-1.8.1 kernel-5.4.3.2 megaco-3.18.3 mnesia-4.15.3.2 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.11 ssh-4.6.9.1 ssl-8.2.6.2 stdlib-3.4.5 syntax_tools-2.1.4.1 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
+OTP-20.3.8.10 : eldap-1.2.3.1 erts-9.3.3.4 # asn1-5.0.5.1 common_test-1.15.4 compiler-7.1.5.2 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.2.2 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 erl_docgen-0.7.3 erl_interface-3.10.2.1 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4.2 inets-6.5.2.4 jinterface-1.8.1 kernel-5.4.3.2 megaco-3.18.3 mnesia-4.15.3.2 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.11 ssh-4.6.9.1 ssl-8.2.6.2 stdlib-3.4.5 syntax_tools-2.1.4.1 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
OTP-20.3.8.9 : compiler-7.1.5.2 # asn1-5.0.5.1 common_test-1.15.4 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.2.2 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_docgen-0.7.3 erl_interface-3.10.2.1 erts-9.3.3.3 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4.2 inets-6.5.2.4 jinterface-1.8.1 kernel-5.4.3.2 megaco-3.18.3 mnesia-4.15.3.2 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.11 ssh-4.6.9.1 ssl-8.2.6.2 stdlib-3.4.5 syntax_tools-2.1.4.1 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
OTP-20.3.8.8 : inets-6.5.2.4 # asn1-5.0.5.1 common_test-1.15.4 compiler-7.1.5.1 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.2.2 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_docgen-0.7.3 erl_interface-3.10.2.1 erts-9.3.3.3 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4.2 jinterface-1.8.1 kernel-5.4.3.2 megaco-3.18.3 mnesia-4.15.3.2 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.11 ssh-4.6.9.1 ssl-8.2.6.2 stdlib-3.4.5 syntax_tools-2.1.4.1 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
OTP-20.3.8.7 : crypto-4.2.2.2 mnesia-4.15.3.2 # asn1-5.0.5.1 common_test-1.15.4 compiler-7.1.5.1 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_docgen-0.7.3 erl_interface-3.10.2.1 erts-9.3.3.3 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4.2 inets-6.5.2.3 jinterface-1.8.1 kernel-5.4.3.2 megaco-3.18.3 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.11 ssh-4.6.9.1 ssl-8.2.6.2 stdlib-3.4.5 syntax_tools-2.1.4.1 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
@@ -48,6 +53,7 @@ OTP-20.0.3 : asn1-5.0.2 compiler-7.1.1 erts-9.0.3 ssh-4.5.1 # common_test-1.15.1
OTP-20.0.2 : asn1-5.0.1 erts-9.0.2 kernel-5.3.1 # common_test-1.15.1 compiler-7.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.2 cosTime-1.2.2 cosTransactions-1.3.2 crypto-4.0 debugger-4.2.2 dialyzer-3.2 diameter-2.0 edoc-0.9 eldap-1.2.2 erl_docgen-0.7 erl_interface-3.10 et-1.6 eunit-2.3.3 hipe-3.16 ic-4.4.2 inets-6.4 jinterface-1.8 megaco-3.18.2 mnesia-4.15 observer-2.4 odbc-2.12 orber-3.8.3 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.5 public_key-1.4.1 reltool-0.7.4 runtime_tools-1.12.1 sasl-3.0.4 snmp-5.2.6 ssh-4.5 ssl-8.2 stdlib-3.4.1 syntax_tools-2.1.2 tools-2.10.1 wx-1.8.1 xmerl-1.3.15 :
OTP-20.0.1 : common_test-1.15.1 erts-9.0.1 runtime_tools-1.12.1 stdlib-3.4.1 tools-2.10.1 # asn1-5.0 compiler-7.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.2 cosTime-1.2.2 cosTransactions-1.3.2 crypto-4.0 debugger-4.2.2 dialyzer-3.2 diameter-2.0 edoc-0.9 eldap-1.2.2 erl_docgen-0.7 erl_interface-3.10 et-1.6 eunit-2.3.3 hipe-3.16 ic-4.4.2 inets-6.4 jinterface-1.8 kernel-5.3 megaco-3.18.2 mnesia-4.15 observer-2.4 odbc-2.12 orber-3.8.3 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.5 public_key-1.4.1 reltool-0.7.4 sasl-3.0.4 snmp-5.2.6 ssh-4.5 ssl-8.2 syntax_tools-2.1.2 wx-1.8.1 xmerl-1.3.15 :
OTP-20.0 : asn1-5.0 common_test-1.15 compiler-7.1 cosProperty-1.2.2 crypto-4.0 debugger-4.2.2 dialyzer-3.2 diameter-2.0 edoc-0.9 erl_docgen-0.7 erl_interface-3.10 erts-9.0 eunit-2.3.3 hipe-3.16 inets-6.4 jinterface-1.8 kernel-5.3 megaco-3.18.2 mnesia-4.15 observer-2.4 orber-3.8.3 parsetools-2.1.5 public_key-1.4.1 reltool-0.7.4 runtime_tools-1.12 sasl-3.0.4 snmp-5.2.6 ssh-4.5 ssl-8.2 stdlib-3.4 syntax_tools-2.1.2 tools-2.10 wx-1.8.1 xmerl-1.3.15 # cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosTime-1.2.2 cosTransactions-1.3.2 eldap-1.2.2 et-1.6 ic-4.4.2 odbc-2.12 os_mon-2.4.2 otp_mibs-1.1.1 :
+OTP-19.3.6.12 : eldap-1.2.2.1 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 erl_docgen-0.6.1 erl_interface-3.9.3 erts-8.3.5.6 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 kernel-5.2.0.1 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 ssh-4.4.2.4 ssl-8.1.3.1.1 stdlib-3.3 syntax_tools-2.1.1.1 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 :
OTP-19.3.6.11 : erts-8.3.5.6 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 eldap-1.2.2 erl_docgen-0.6.1 erl_interface-3.9.3 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 kernel-5.2.0.1 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 ssh-4.4.2.4 ssl-8.1.3.1.1 stdlib-3.3 syntax_tools-2.1.1.1 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 :
OTP-19.3.6.10 : erts-8.3.5.5 syntax_tools-2.1.1.1 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 eldap-1.2.2 erl_docgen-0.6.1 erl_interface-3.9.3 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 kernel-5.2.0.1 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 ssh-4.4.2.4 ssl-8.1.3.1.1 stdlib-3.3 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 :
OTP-19.3.6.9 : ssh-4.4.2.4 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 eldap-1.2.2 erl_docgen-0.6.1 erl_interface-3.9.3 erts-8.3.5.4 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 kernel-5.2.0.1 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 ssl-8.1.3.1.1 stdlib-3.3 syntax_tools-2.1.1 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 :
@@ -87,6 +93,8 @@ OTP-19.0.3 : inets-6.3.2 kernel-5.0.1 ssl-8.0.1 # asn1-4.0.3 common_test-1.12.2
OTP-19.0.2 : compiler-7.0.1 erts-8.0.2 stdlib-3.0.1 # asn1-4.0.3 common_test-1.12.2 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7 debugger-4.2 dialyzer-3.0.1 diameter-1.12 edoc-0.7.19 eldap-1.2.2 erl_docgen-0.5 erl_interface-3.9 et-1.6 eunit-2.3 gs-1.6.1 hipe-3.15.1 ic-4.4.1 inets-6.3.1 jinterface-1.7 kernel-5.0 megaco-3.18.1 mnesia-4.14 observer-2.2.1 odbc-2.11.2 orber-3.8.2 os_mon-2.4.1 otp_mibs-1.1.1 parsetools-2.1.2 percept-0.9 public_key-1.2 reltool-0.7.1 runtime_tools-1.10 sasl-3.0 snmp-5.2.3 ssh-4.3.1 ssl-8.0 syntax_tools-2.0 tools-2.8.5 typer-0.9.11 wx-1.7 xmerl-1.3.11 :
OTP-19.0.1 : dialyzer-3.0.1 erts-8.0.1 inets-6.3.1 observer-2.2.1 ssh-4.3.1 tools-2.8.5 # asn1-4.0.3 common_test-1.12.2 compiler-7.0 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7 debugger-4.2 diameter-1.12 edoc-0.7.19 eldap-1.2.2 erl_docgen-0.5 erl_interface-3.9 et-1.6 eunit-2.3 gs-1.6.1 hipe-3.15.1 ic-4.4.1 jinterface-1.7 kernel-5.0 megaco-3.18.1 mnesia-4.14 odbc-2.11.2 orber-3.8.2 os_mon-2.4.1 otp_mibs-1.1.1 parsetools-2.1.2 percept-0.9 public_key-1.2 reltool-0.7.1 runtime_tools-1.10 sasl-3.0 snmp-5.2.3 ssl-8.0 stdlib-3.0 syntax_tools-2.0 typer-0.9.11 wx-1.7 xmerl-1.3.11 :
OTP-19.0 : asn1-4.0.3 common_test-1.12.2 compiler-7.0 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7 debugger-4.2 dialyzer-3.0 diameter-1.12 edoc-0.7.19 eldap-1.2.2 erl_docgen-0.5 erl_interface-3.9 erts-8.0 et-1.6 eunit-2.3 gs-1.6.1 hipe-3.15.1 ic-4.4.1 inets-6.3 jinterface-1.7 kernel-5.0 megaco-3.18.1 mnesia-4.14 observer-2.2 odbc-2.11.2 orber-3.8.2 os_mon-2.4.1 otp_mibs-1.1.1 parsetools-2.1.2 percept-0.9 public_key-1.2 reltool-0.7.1 runtime_tools-1.10 sasl-3.0 snmp-5.2.3 ssh-4.3 ssl-8.0 stdlib-3.0 syntax_tools-2.0 tools-2.8.4 typer-0.9.11 wx-1.7 xmerl-1.3.11 # :
+OTP-18.3.4.11 : erts-7.3.1.6 stdlib-2.8.0.1 # asn1-4.0.2 common_test-1.12.1.1 compiler-6.0.3.1 cosEvent-2.2 cosEventDomain-1.2 cosFileTransfer-1.2 cosNotification-1.2.1 cosProperty-1.2 cosTime-1.2.1 cosTransactions-1.3.1 crypto-3.6.3.1 debugger-4.1.2 dialyzer-2.9 diameter-1.11.2 edoc-0.7.18 eldap-1.2.1.1 erl_docgen-0.4.2 erl_interface-3.8.2 et-1.5.1 eunit-2.2.13 gs-1.6 hipe-3.15 ic-4.4 inets-6.2.4.1 jinterface-1.6.1 kernel-4.2 megaco-3.18 mnesia-4.13.4 observer-2.1.2 odbc-2.11.1 orber-3.8.1 os_mon-2.4 ose-1.1 otp_mibs-1.1 parsetools-2.1.1 percept-0.8.11 public_key-1.1.1 reltool-0.7 runtime_tools-1.9.3 sasl-2.7 snmp-5.2.2 ssh-4.2.2.6 ssl-7.3.3.2 syntax_tools-1.7 test_server-3.10 tools-2.8.3 typer-0.9.10 webtool-0.9.1 wx-1.6.1 xmerl-1.3.10 :
+OTP-18.3.4.10 : erts-7.3.1.5 # asn1-4.0.2 common_test-1.12.1.1 compiler-6.0.3.1 cosEvent-2.2 cosEventDomain-1.2 cosFileTransfer-1.2 cosNotification-1.2.1 cosProperty-1.2 cosTime-1.2.1 cosTransactions-1.3.1 crypto-3.6.3.1 debugger-4.1.2 dialyzer-2.9 diameter-1.11.2 edoc-0.7.18 eldap-1.2.1.1 erl_docgen-0.4.2 erl_interface-3.8.2 et-1.5.1 eunit-2.2.13 gs-1.6 hipe-3.15 ic-4.4 inets-6.2.4.1 jinterface-1.6.1 kernel-4.2 megaco-3.18 mnesia-4.13.4 observer-2.1.2 odbc-2.11.1 orber-3.8.1 os_mon-2.4 ose-1.1 otp_mibs-1.1 parsetools-2.1.1 percept-0.8.11 public_key-1.1.1 reltool-0.7 runtime_tools-1.9.3 sasl-2.7 snmp-5.2.2 ssh-4.2.2.6 ssl-7.3.3.2 stdlib-2.8 syntax_tools-1.7 test_server-3.10 tools-2.8.3 typer-0.9.10 webtool-0.9.1 wx-1.6.1 xmerl-1.3.10 :
OTP-18.3.4.9 : ssh-4.2.2.6 # asn1-4.0.2 common_test-1.12.1.1 compiler-6.0.3.1 cosEvent-2.2 cosEventDomain-1.2 cosFileTransfer-1.2 cosNotification-1.2.1 cosProperty-1.2 cosTime-1.2.1 cosTransactions-1.3.1 crypto-3.6.3.1 debugger-4.1.2 dialyzer-2.9 diameter-1.11.2 edoc-0.7.18 eldap-1.2.1.1 erl_docgen-0.4.2 erl_interface-3.8.2 erts-7.3.1.4 et-1.5.1 eunit-2.2.13 gs-1.6 hipe-3.15 ic-4.4 inets-6.2.4.1 jinterface-1.6.1 kernel-4.2 megaco-3.18 mnesia-4.13.4 observer-2.1.2 odbc-2.11.1 orber-3.8.1 os_mon-2.4 ose-1.1 otp_mibs-1.1 parsetools-2.1.1 percept-0.8.11 public_key-1.1.1 reltool-0.7 runtime_tools-1.9.3 sasl-2.7 snmp-5.2.2 ssl-7.3.3.2 stdlib-2.8 syntax_tools-1.7 test_server-3.10 tools-2.8.3 typer-0.9.10 webtool-0.9.1 wx-1.6.1 xmerl-1.3.10 :
OTP-18.3.4.8 : ssh-4.2.2.5 # asn1-4.0.2 common_test-1.12.1.1 compiler-6.0.3.1 cosEvent-2.2 cosEventDomain-1.2 cosFileTransfer-1.2 cosNotification-1.2.1 cosProperty-1.2 cosTime-1.2.1 cosTransactions-1.3.1 crypto-3.6.3.1 debugger-4.1.2 dialyzer-2.9 diameter-1.11.2 edoc-0.7.18 eldap-1.2.1.1 erl_docgen-0.4.2 erl_interface-3.8.2 erts-7.3.1.4 et-1.5.1 eunit-2.2.13 gs-1.6 hipe-3.15 ic-4.4 inets-6.2.4.1 jinterface-1.6.1 kernel-4.2 megaco-3.18 mnesia-4.13.4 observer-2.1.2 odbc-2.11.1 orber-3.8.1 os_mon-2.4 ose-1.1 otp_mibs-1.1 parsetools-2.1.1 percept-0.8.11 public_key-1.1.1 reltool-0.7 runtime_tools-1.9.3 sasl-2.7 snmp-5.2.2 ssl-7.3.3.2 stdlib-2.8 syntax_tools-1.7 test_server-3.10 tools-2.8.3 typer-0.9.10 webtool-0.9.1 wx-1.6.1 xmerl-1.3.10 :
OTP-18.3.4.7 : ssl-7.3.3.2 # asn1-4.0.2 common_test-1.12.1.1 compiler-6.0.3.1 cosEvent-2.2 cosEventDomain-1.2 cosFileTransfer-1.2 cosNotification-1.2.1 cosProperty-1.2 cosTime-1.2.1 cosTransactions-1.3.1 crypto-3.6.3.1 debugger-4.1.2 dialyzer-2.9 diameter-1.11.2 edoc-0.7.18 eldap-1.2.1.1 erl_docgen-0.4.2 erl_interface-3.8.2 erts-7.3.1.4 et-1.5.1 eunit-2.2.13 gs-1.6 hipe-3.15 ic-4.4 inets-6.2.4.1 jinterface-1.6.1 kernel-4.2 megaco-3.18 mnesia-4.13.4 observer-2.1.2 odbc-2.11.1 orber-3.8.1 os_mon-2.4 ose-1.1 otp_mibs-1.1 parsetools-2.1.1 percept-0.8.11 public_key-1.1.1 reltool-0.7 runtime_tools-1.9.3 sasl-2.7 snmp-5.2.2 ssh-4.2.2.4 stdlib-2.8 syntax_tools-1.7 test_server-3.10 tools-2.8.3 typer-0.9.10 webtool-0.9.1 wx-1.6.1 xmerl-1.3.10 :
diff --git a/system/doc/efficiency_guide/commoncaveats.xml b/system/doc/efficiency_guide/commoncaveats.xml
index b41ffc3902..367da09ba3 100644
--- a/system/doc/efficiency_guide/commoncaveats.xml
+++ b/system/doc/efficiency_guide/commoncaveats.xml
@@ -169,53 +169,5 @@ multiple_setelement(T0) ->
{Bin1,Bin2} = split_binary(Bin, Num)</code>
</section>
- <section>
- <title>Operator "--"</title>
- <p>The "<c>--</c>" operator has a complexity
- proportional to the product of the length of its operands.
- This means that the operator is very slow if both of its operands
- are long lists:</p>
-
- <p><em>DO NOT</em></p>
- <code type="none"><![CDATA[
- HugeList1 -- HugeList2]]></code>
-
- <p>Instead use the <seealso marker="stdlib:ordsets">ordsets</seealso>
- module in STDLIB:</p>
-
- <p><em>DO</em></p>
- <code type="none">
- HugeSet1 = ordsets:from_list(HugeList1),
- HugeSet2 = ordsets:from_list(HugeList2),
- ordsets:subtract(HugeSet1, HugeSet2)</code>
-
- <p>Obviously, that code does not work if the original order
- of the list is important. If the order of the list must be
- preserved, do as follows:</p>
-
- <p><em>DO</em></p>
- <code type="none"><![CDATA[
- Set = gb_sets:from_list(HugeList2),
- [E || E <- HugeList1, not gb_sets:is_element(E, Set)]]]></code>
-
- <note><p>This code behaves differently from "<c>--</c>"
- if the lists contain duplicate elements (one occurrence
- of an element in HugeList2 removes <em>all</em>
- occurrences in HugeList1.)</p>
- <p>Also, this code compares lists elements using the
- "<c>==</c>" operator, while "<c>--</c>" uses the "<c>=:=</c>" operator.
- If that difference is important, <c>sets</c> can be used instead of
- <c>gb_sets</c>, but <c>sets:from_list/1</c> is much
- slower than <c>gb_sets:from_list/1</c> for long lists.</p></note>
-
- <p>Using the "<c>--</c>" operator to delete an element
- from a list is not a performance problem:</p>
-
- <p><em>OK</em></p>
- <code type="none">
- HugeList1 -- [Element]</code>
-
- </section>
-
</chapter>
diff --git a/system/doc/efficiency_guide/profiling.xml b/system/doc/efficiency_guide/profiling.xml
index cdc80289cf..5ec1f1be6e 100644
--- a/system/doc/efficiency_guide/profiling.xml
+++ b/system/doc/efficiency_guide/profiling.xml
@@ -94,7 +94,7 @@
<p>The above slogan is one of the more common reasons for Erlang to terminate.
For unknown reasons the Erlang Run-Time System failed to allocate memory to
use. When this happens a crash dump is generated that contains information
- about the state of the system as it ran out of mmeory. Use the
+ about the state of the system as it ran out of memory. Use the
<seealso marker="observer:cdv"><c>crashdump_viewer</c></seealso> to get a
view of the memory is being used. Look for processes with large heaps or
many messages, large ets tables, etc.</p>
diff --git a/system/doc/efficiency_guide/retired_myths.xml b/system/doc/efficiency_guide/retired_myths.xml
index 9b914a3b6e..144c942c2b 100644
--- a/system/doc/efficiency_guide/retired_myths.xml
+++ b/system/doc/efficiency_guide/retired_myths.xml
@@ -60,4 +60,18 @@
That leads us to the myth that tail-recursive functions are faster
than body-recursive functions.</p>
</section>
+
+ <section>
+ <title>Myth: List subtraction ("--" operator) is slow</title>
+
+ <p>List subtraction used to have a run-time complexity proportional to the
+ product of the length of its operands, so it was extremely slow when both
+ lists were long.</p>
+
+ <p>As of OTP 22 the run-time complexity is "n log n" and the operation will
+ complete quickly even when both lists are very long. In fact, it is
+ faster and uses less memory than the commonly used workaround to convert
+ both lists to ordered sets before subtracting them with
+ <c>ordsets:subtract/2</c>.</p>
+ </section>
</chapter>