aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile.in2
-rw-r--r--configure.src2
-rw-r--r--erts/autoconf/vxworks/sed.general1
-rw-r--r--erts/configure.in23
-rw-r--r--erts/doc/src/notes.xml40
-rw-r--r--erts/emulator/Makefile.in15
-rw-r--r--erts/emulator/beam/erl_message.c7
-rw-r--r--erts/emulator/beam/erl_message.h6
-rw-r--r--erts/emulator/beam/erl_nif.c41
-rw-r--r--erts/emulator/drivers/common/inet_drv.c113
-rw-r--r--erts/lib_src/Makefile.in7
-rw-r--r--erts/vsn.mk2
-rw-r--r--lib/Makefile4
-rw-r--r--lib/compiler/src/beam_validator.erl698
-rw-r--r--lib/compiler/test/beam_validator_SUITE.erl29
-rw-r--r--lib/compiler/test/beam_validator_SUITE_data/merge_undefined.S4
-rw-r--r--lib/compiler/test/beam_validator_SUITE_data/receive_stacked.S4
-rw-r--r--lib/erl_interface/configure.in2
-rw-r--r--lib/erl_interface/src/Makefile2
-rw-r--r--lib/ftp/src/ftp.erl287
-rw-r--r--lib/ftp/test/ftp_SUITE.erl67
-rw-r--r--lib/kernel/doc/src/kernel_app.xml10
-rw-r--r--lib/kernel/doc/src/logger.xml2
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml14
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml122
-rw-r--r--lib/kernel/doc/src/notes.xml19
-rw-r--r--lib/kernel/src/kernel.app.src2
-rw-r--r--lib/kernel/src/kernel.appup.src8
-rw-r--r--lib/kernel/src/logger_formatter.erl71
-rw-r--r--lib/kernel/src/logger_h_common.erl35
-rw-r--r--lib/kernel/src/logger_olp.erl5
-rw-r--r--lib/kernel/src/logger_olp.hrl7
-rw-r--r--lib/kernel/src/logger_std_h.erl503
-rw-r--r--lib/kernel/test/kernel.spec1
-rw-r--r--lib/kernel/test/logger_SUITE.erl14
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl428
-rw-r--r--lib/kernel/test/logger_stress_SUITE.erl94
-rw-r--r--lib/kernel/vsn.mk2
-rw-r--r--lib/ssh/test/ssh_protocol_SUITE.erl6
-rw-r--r--lib/stdlib/src/io_lib_pretty.erl46
-rw-r--r--lib/stdlib/test/io_SUITE.erl39
-rw-r--r--lib/tools/c_src/Makefile.in7
-rw-r--r--make/otp.mk.in1
-rwxr-xr-xotp_build2
-rw-r--r--otp_versions.table1
-rw-r--r--system/doc/design_principles/statem.xml13
-rw-r--r--system/doc/general_info/deprecations.xml21
-rw-r--r--system/doc/top/Makefile16
48 files changed, 2040 insertions, 805 deletions
diff --git a/Makefile.in b/Makefile.in
index 30755c2e4c..494ab52b3a 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -769,7 +769,7 @@ tertiary_bootstrap_copy:
true; \
done
# copy erl_interface includes
- $(V_at)for x in lib/erl_interface/include/* lib/erl_interface/include/$(TARGET)/*; do \
+ $(V_at)for x in lib/erl_interface/include/*.h lib/erl_interface/include/$(TARGET)/*.h; do \
BN=`basename $$x`; \
TF=$(BOOTSTRAP_ROOT)/bootstrap/lib/erl_interface/include/$$BN; \
test -f $$TF && \
diff --git a/configure.src b/configure.src
index 889902eb96..4b748f2545 100644
--- a/configure.src
+++ b/configure.src
@@ -70,8 +70,10 @@ while test $# != 0; do
ERL_TOP="$user_srcdir"
;;
--enable-bootstrap-only)
+ config_arguments="$config_arguments --enable-bootstrap-only"
bootstrap_only=yes;;
--disable-bootstrap-only)
+ config_arguments="$config_arguments --disable-bootstrap-only"
bootstrap_only=no;;
--enable-option-checking)
echo "ERROR: Cannot enable option checking" 1>&2
diff --git a/erts/autoconf/vxworks/sed.general b/erts/autoconf/vxworks/sed.general
index 0e99b4dba4..d32fbdc5c0 100644
--- a/erts/autoconf/vxworks/sed.general
+++ b/erts/autoconf/vxworks/sed.general
@@ -103,7 +103,6 @@ s|@INSTALL_PROGRAM@|${INSTALL}|
s|@INSTALL_SCRIPT@|${INSTALL}|
s|@INSTALL_DATA@|${INSTALL} -m 644|
s|@INSTALL_DIR@|$(INSTALL) -d|
-s|@RM@|/bin/rm|
s|@MKDIR@|/bin/mkdir|
s|@ERLANG_OSTYPE@|vxworks|
s|@vxworks_reclaim@|reclaim.h|
diff --git a/erts/configure.in b/erts/configure.in
index ddbb65e704..b070ad0649 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -772,11 +772,6 @@ AC_SUBST(LIBCARBON)
_search_path=/bin:/usr/bin:/usr/local/bin:$PATH
-AC_PATH_PROG(RM, rm, false, $_search_path)
-if test "$ac_cv_path_RM" = false; then
- AC_MSG_ERROR([No 'rm' command found])
-fi
-
AC_PATH_PROG(MKDIR, mkdir, false, $_search_path)
if test "$ac_cv_path_MKDIR" = false; then
AC_MSG_ERROR([No 'mkdir' command found])
@@ -791,9 +786,9 @@ _search_path=
# Remove old configuration information.
-# Next line should be placed after AC_PATH_PROG(RM, ...), but before
-# first output to CONN_INFO. So this is just the right place.
-$RM -f "$ERL_TOP/erts/CONF_INFO"
+# Next line should be before first output to CONN_INFO. So this is
+# just the right place.
+rm -f "$ERL_TOP/erts/CONF_INFO"
dnl Check if we should/can build a sharing-preserving emulator
AC_MSG_CHECKING(if we are building a sharing-preserving emulator)
@@ -831,7 +826,7 @@ fi
## Delete previous failed configure results
if test -f doc/CONF_INFO; then
- $RM doc/CONF_INFO
+ rm -f doc/CONF_INFO
fi
AC_CHECK_PROGS(XSLTPROC, xsltproc)
@@ -3085,14 +3080,14 @@ if test "$enable_dtrace_test" = "yes" ; then
AC_MSG_CHECKING([for 2-stage DTrace precompilation])
AC_TRY_COMPILE([ #include "foo-dtrace.h" ],
[ERLANG_DIST_PORT_BUSY_ENABLED();],
- [$RM -f $DTRACE_2STEP_TEST
+ [rm -f $DTRACE_2STEP_TEST
dtrace -G $DTRACE_CPP $DTRACE_BITS_FLAG -Iemulator/beam -o $DTRACE_2STEP_TEST -s emulator/beam/erlang_dtrace.d conftest.$OBJEXT 2>&AS_MESSAGE_LOG_FD
if test -f $DTRACE_2STEP_TEST; then
- $RM $DTRACE_2STEP_TEST
+ rm -f $DTRACE_2STEP_TEST
DTRACE_ENABLED_2STEP=yes
fi],
[])
- $RM -f foo-dtrace.h
+ rm -f foo-dtrace.h
AS_IF([test "x$DTRACE_ENABLED_2STEP" = "xyes"],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])])
@@ -3190,7 +3185,7 @@ need_java="jinterface ic/java_src"
# Remove all SKIP files from previous runs
for a in $need_java ; do
- $RM -f $ERL_TOP/lib/$a/SKIP
+ rm -f $ERL_TOP/lib/$a/SKIP
done
if test "X$with_javac" = "Xno"; then
@@ -3241,7 +3236,7 @@ dnl this deliberately does not believe that 'gcc' is a C++ compiler
AC_CHECK_TOOLS(CXX, [$CCC c++ g++ CC cxx cc++ cl], false)
# Remove SKIP file from previous run
-$RM -f $ERL_TOP/lib/orber/SKIP
+rm -f $ERL_TOP/lib/orber/SKIP
if test "$CXX" = false; then
echo "No C++ compiler found" > $ERL_TOP/lib/orber/SKIP
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index 3473a12526..2e1650516e 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -31,6 +31,46 @@
</header>
<p>This document describes the changes made to the ERTS application.</p>
+<section><title>Erts 10.2.5</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Fixes of install/release phase in build system.</p>
+ <list> <item>The source tree was modified when
+ installing/releasing and/or applying a patch.</item>
+ <item>Some files were installed with wrong access
+ rights.</item> <item>If applying a patch (using
+ <c>otp_patch_apply</c>) as another user (except root)
+ than the user that built the source, the documentation
+ was not properly updated.</item> </list>
+ <p>
+ Own Id: OTP-15551</p>
+ </item>
+ <item>
+ <p>
+ Setting the <c>recbuf</c> size of an inet socket the
+ <c>buffer</c> is also automatically increased. Fix a bug
+ where the auto adjustment of inet buffer size would be
+ triggered even if an explicit inet buffer size had
+ already been set.</p>
+ <p>
+ Own Id: OTP-15651 Aux Id: ERIERL-304 </p>
+ </item>
+ <item>
+ <p>
+ Reading from UDP using active <c>true</c> or active
+ <c>N</c> mode has been optimized when more packets than
+ specified by <c>read_packets</c> are available on the
+ socket.</p>
+ <p>
+ Own Id: OTP-15652 Aux Id: ERIERL-304 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 10.2.4</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 3e9cfc990a..21351df656 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -267,7 +267,6 @@ DEXPORT = @DEXPORT@
RANLIB = @RANLIB@
STRIP = strip
PERL = @PERL@
-RM = @RM@
MKDIR = @MKDIR@
USING_MINGW=@MIXED_CYGWIN_MINGW@
@@ -468,13 +467,13 @@ $(ERTS_LIB):
.PHONY: clean
clean:
- $(RM) -f $(GENERATE)
- $(RM) -rf $(TARGET)/*.c $(TARGET)/*.h $(TARGET)/*-GENERATED
- $(RM) -rf $(TARGET)/*/*
- $(RM) -rf obj/$(TARGET)
- $(RM) -rf pcre/obj/$(TARGET) $(PCRE_GENINC)
- $(RM) -rf zlib/obj/$(TARGET)
- $(RM) -rf bin/$(TARGET)
+ $(RM) $(GENERATE)
+ $(RM) -r $(TARGET)/*.c $(TARGET)/*.h $(TARGET)/*-GENERATED
+ $(RM) -r $(TARGET)/*/*
+ $(RM) -r obj/$(TARGET)
+ $(RM) -r pcre/obj/$(TARGET) $(PCRE_GENINC)
+ $(RM) -r zlib/obj/$(TARGET)
+ $(RM) -r bin/$(TARGET)
cd $(ERTS_LIB_DIR) && $(MAKE) clean
.PHONY: docs
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 9d40754d2d..e350a20339 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -776,6 +776,13 @@ erts_send_message(Process* sender,
#endif
erts_queue_proc_message(sender, receiver, *receiver_locks, mp, message);
+
+ if (msize > ERTS_MSG_COPY_WORDS_PER_REDUCTION) {
+ Uint reds = msize / ERTS_MSG_COPY_WORDS_PER_REDUCTION;
+ if (reds > CONTEXT_REDS)
+ reds = CONTEXT_REDS;
+ BUMP_REDS(sender, (int) reds);
+ }
}
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 4c2674394e..e5f623a370 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -26,6 +26,12 @@
#include "erl_proc_sig_queue.h"
#undef ERTS_PROC_SIG_QUEUE_TYPE_ONLY
+#ifdef DEBUG
+#define ERTS_MSG_COPY_WORDS_PER_REDUCTION 4
+#else
+#define ERTS_MSG_COPY_WORDS_PER_REDUCTION 64
+#endif
+
struct proc_bin;
struct external_thing_;
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 349d9bf13a..5a3cdf980d 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -731,6 +731,23 @@ ErtsMessage* erts_create_message_from_nif_env(ErlNifEnv* msg_env)
return mp;
}
+static ERTS_INLINE ERL_NIF_TERM make_copy(ErlNifEnv* dst_env,
+ ERL_NIF_TERM src_term,
+ Uint *cpy_szp)
+{
+ Uint sz;
+ Eterm* hp;
+ /*
+ * No preserved sharing allowed as long as literals are also preserved.
+ * Process independent environment can not be reached by purge.
+ */
+ sz = size_object(src_term);
+ if (cpy_szp)
+ *cpy_szp += sz;
+ hp = alloc_heap(dst_env, sz);
+ return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc));
+}
+
int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ErlNifEnv* msg_env, ERL_NIF_TERM msg)
{
@@ -743,6 +760,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
Eterm from;
Eterm receiver = to_pid->pid;
int scheduler;
+ Uint copy_sz = 0;
execution_state(env, &c_p, &scheduler);
@@ -806,14 +824,14 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
stoken = NIL;
}
#endif
- token = enif_make_copy(msg_env, stoken);
+ token = make_copy(msg_env, stoken, &copy_sz);
#ifdef USE_VM_PROBES
if (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING) {
if (is_immed(DT_UTAG(c_p)))
utag = DT_UTAG(c_p);
else
- utag = enif_make_copy(msg_env, DT_UTAG(c_p));
+ utag = make_copy(msg_env, DT_UTAG(c_p), &copy_sz);
}
if (DTRACE_ENABLED(message_send)) {
if (have_seqtrace(stoken)) {
@@ -835,6 +853,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
Uint sz;
INITIALIZE_LITERAL_PURGE_AREA(litarea);
sz = size_object_litopt(msg, &litarea);
+ copy_sz += sz;
if (c_p && !env->tracee) {
full_flush_env(env);
mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
@@ -867,6 +886,12 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
trace_send(c_p, receiver, msg);
full_cache_env(env);
}
+ if (c_p && scheduler > 0 && copy_sz > ERTS_MSG_COPY_WORDS_PER_REDUCTION) {
+ Uint reds = copy_sz / ERTS_MSG_COPY_WORDS_PER_REDUCTION;
+ if (reds > CONTEXT_REDS)
+ reds = CONTEXT_REDS;
+ BUMP_REDS(c_p, (int) reds);
+ }
}
else {
/* This clause is taken when the nif is called in the context
@@ -935,6 +960,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
erts_queue_message(rp, rp_locks, mp, msg, from);
done:
+
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks & ~lc_locks)
@@ -1047,18 +1073,9 @@ int enif_whereis_port(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port)
ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
{
- Uint sz;
- Eterm* hp;
- /*
- * No preserved sharing allowed as long as literals are also preserved.
- * Process independent environment cannot be reached by purge.
- */
- sz = size_object(src_term);
- hp = alloc_heap(dst_env, sz);
- return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc));
+ return make_copy(dst_env, src_term, NULL);
}
-
#ifdef DEBUG
static int is_offheap(const ErlOffHeap* oh)
{
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 78411f324c..c93966d24f 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -574,7 +574,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
driver_select(port, e, mode | (on?ERL_DRV_USE:0), on)
#define sock_select(d, flags, onoff) do { \
- ASSERT(!(d)->is_ignored); \
+ ASSERT(!INET_IGNORED(d)); \
(d)->event_mask = (onoff) ? \
((d)->event_mask | (flags)) : \
((d)->event_mask & ~(flags)); \
@@ -898,6 +898,15 @@ static size_t my_strnlen(const char *s, size_t maxlen)
#define INET_CMSG_RECVTCLASS (1 << 1) /* am_recvtclass, am_tclass */
#define INET_CMSG_RECVTTL (1 << 2) /* am_recvttl, am_ttl */
+/* Inet flags */
+#define INET_FLG_BUFFER_SET (1 << 0) /* am_buffer has been set by user */
+#define INET_FLG_IS_IGNORED (1 << 1) /* If a fd is ignored by the inet_drv.
+ This flag should be set to true when
+ the fd is used outside of inet_drv. */
+#define INET_FLG_IS_IGNORED_RD (1 << 2)
+#define INET_FLG_IS_IGNORED_WR (1 << 3)
+#define INET_FLG_IS_IGNORED_PASS (1 << 4)
+
/*
** End of interface constants.
**--------------------------------------------------------------------------*/
@@ -943,10 +952,11 @@ static size_t my_strnlen(const char *s, size_t maxlen)
#define INET_IFNAMSIZ 16
/* INET Ignore states */
-#define INET_IGNORE_NONE 0
-#define INET_IGNORE_READ (1 << 0)
-#define INET_IGNORE_WRITE (1 << 1)
-#define INET_IGNORE_PASSIVE (1 << 2)
+#define INET_IGNORE_CLEAR(desc) ((desc)->flags & ~(INET_IGNORE_READ|INET_IGNORE_WRITE|INET_IGNORE_PASSIVE))
+#define INET_IGNORED(desc) ((desc)->flags & INET_FLG_IS_IGNORED)
+#define INET_IGNORE_READ (INET_FLG_IS_IGNORED|INET_FLG_IS_IGNORED_RD)
+#define INET_IGNORE_WRITE (INET_FLG_IS_IGNORED|INET_FLG_IS_IGNORED_WR)
+#define INET_IGNORE_PASSIVE (INET_FLG_IS_IGNORED|INET_FLG_IS_IGNORED_PASS)
/* Max length of Erlang Term Buffer (for outputting structured terms): */
#ifdef HAVE_SCTP
@@ -1128,9 +1138,7 @@ typedef struct {
double send_avg; /* average packet size sent */
subs_list empty_out_q_subs; /* Empty out queue subscribers */
- int is_ignored; /* if a fd is ignored by the inet_drv.
- This flag should be set to true when
- the fd is used outside of inet_drv. */
+ int flags;
#ifdef HAVE_SETNS
char *netns; /* Socket network namespace name
as full file path */
@@ -4551,7 +4559,7 @@ static void desc_close(inet_descriptor* desc)
* We should close the fd here, but the other driver might still
* be selecting on it.
*/
- if (!desc->is_ignored)
+ if (!INET_IGNORED(desc))
driver_select(desc->port,(ErlDrvEvent)(long)desc->event,
ERL_DRV_USE, 0);
else
@@ -6312,6 +6320,7 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
(long)desc->port, desc->s, ival));
if (ival < INET_MIN_BUFFER) ival = INET_MIN_BUFFER;
desc->bufsz = ival;
+ desc->flags |= INET_FLG_BUFFER_SET;
continue;
case INET_LOPT_ACTIVE:
@@ -6507,6 +6516,16 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
case INET_OPT_RCVBUF: type = SO_RCVBUF;
DEBUGF(("inet_set_opts(%ld): s=%d, SO_RCVBUF=%d\r\n",
(long)desc->port, desc->s, ival));
+ if (!(desc->flags & INET_FLG_BUFFER_SET)) {
+ /* make sure we have desc->bufsz >= SO_RCVBUF */
+ if (ival > (1 << 16) && desc->stype == SOCK_DGRAM && !IS_SCTP(desc))
+ /* For UDP we don't want to automatically
+ set the buffer size to be larger than
+ the theoretical max MTU */
+ desc->bufsz = 1 << 16;
+ else if (ival > desc->bufsz)
+ desc->bufsz = ival;
+ }
break;
case INET_OPT_LINGER: type = SO_LINGER;
if (len < 4)
@@ -6752,23 +6771,17 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
}
DEBUGF(("inet_set_opts(%ld): s=%d returned %d\r\n",
(long)desc->port, desc->s, res));
- if (type == SO_RCVBUF) {
- /* make sure we have desc->bufsz >= SO_RCVBUF */
- if (ival > (1 << 16) && desc->stype == SOCK_DGRAM && !IS_SCTP(desc))
- /* For UDP we don't want to automatically
- set the buffer size to be larger than
- the theoretical max MTU */
- desc->bufsz = 1 << 16;
- else if (ival > desc->bufsz)
- desc->bufsz = ival;
- }
}
if ( ((desc->stype == SOCK_STREAM) && IS_CONNECTED(desc)) ||
((desc->stype == SOCK_DGRAM) && IS_OPEN(desc))) {
- if (desc->active != old_active)
+ if (desc->active != old_active) {
+ /* Need to cancel the read_packet timer if we go from active to passive. */
+ if (desc->active == INET_PASSIVE && desc->stype == SOCK_DGRAM)
+ driver_cancel_timer(desc->port);
sock_select(desc, (FD_READ|FD_CLOSE), (desc->active>0));
+ }
/* XXX: UDP sockets could also trigger immediate read here NIY */
if ((desc->stype==SOCK_STREAM) && desc->active) {
@@ -6910,6 +6923,7 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
if (desc->bufsz < INET_MIN_BUFFER)
desc->bufsz = INET_MIN_BUFFER;
+ desc->flags |= INET_FLG_BUFFER_SET;
res = 0; /* This does not affect the kernel buffer size */
continue;
@@ -7031,6 +7045,7 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
smaller than the kernel one: */
if (desc->bufsz <= arg.ival)
desc->bufsz = arg.ival;
+ desc->flags |= INET_FLG_BUFFER_SET;
break;
}
case INET_OPT_SNDBUF:
@@ -7041,10 +7056,6 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
arg_ptr = (char*) (&arg.ival);
arg_sz = sizeof ( arg.ival);
- /* Adjust the size of the user-level recv buffer, so it's not
- smaller than the kernel one: */
- if (desc->bufsz <= arg.ival)
- desc->bufsz = arg.ival;
break;
}
case INET_OPT_REUSEADDR:
@@ -9101,7 +9112,7 @@ static ErlDrvData inet_start(ErlDrvPort port, int size, int protocol)
sys_memzero((char *)&desc->remote,sizeof(desc->remote));
- desc->is_ignored = 0;
+ desc->flags = 0;
#ifdef HAVE_SETNS
desc->netns = NULL;
@@ -9503,19 +9514,19 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf,
if (desc->stype != SOCK_STREAM)
return ctl_error(EINVAL, rbuf, rsize);
- if (*buf == 1 && !desc->is_ignored) {
+ if (*buf == 1 && !INET_IGNORED(desc)) {
sock_select(desc, (FD_READ|FD_WRITE|FD_CLOSE|ERL_DRV_USE_NO_CALLBACK), 0);
if (desc->active)
- desc->is_ignored = INET_IGNORE_READ;
+ desc->flags |= INET_IGNORE_READ;
else
- desc->is_ignored = INET_IGNORE_PASSIVE;
- } else if (*buf == 0 && desc->is_ignored) {
+ desc->flags |= INET_IGNORE_PASSIVE;
+ } else if (*buf == 0 && INET_IGNORED(desc)) {
int flags = FD_CLOSE;
- if (desc->is_ignored & INET_IGNORE_READ)
+ if (desc->flags & INET_IGNORE_READ)
flags |= FD_READ;
- if (desc->is_ignored & INET_IGNORE_WRITE)
+ if (desc->flags & INET_IGNORE_WRITE)
flags |= FD_WRITE;
- desc->is_ignored = INET_IGNORE_NONE;
+ desc->flags = INET_IGNORE_CLEAR(desc);
if (flags != FD_CLOSE)
sock_select(desc, flags, 1);
} else
@@ -10231,17 +10242,17 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd,
if (enq_async(INETP(desc), tbuf, TCP_REQ_RECV) < 0)
return ctl_error(EALREADY, rbuf, rsize);
- if (INETP(desc)->is_ignored || tcp_recv(desc, n) == 0) {
+ if (INET_IGNORED(INETP(desc)) || tcp_recv(desc, n) == 0) {
if (timeout == 0)
async_error_am(INETP(desc), am_timeout);
else {
if (timeout != INET_INFINITY)
add_multi_timer(desc, INETP(desc)->port, 0,
timeout, &tcp_inet_recv_timeout);
- if (!INETP(desc)->is_ignored)
+ if (!INET_IGNORED(INETP(desc)))
sock_select(INETP(desc),(FD_READ|FD_CLOSE),1);
else
- INETP(desc)->is_ignored |= INET_IGNORE_READ;
+ INETP(desc)->flags |= INET_IGNORE_READ;
}
}
return ctl_reply(INET_REP_OK, tbuf, 2, rbuf, rsize);
@@ -11108,7 +11119,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
#ifdef DEBUG
long port = (long) desc->inet.port; /* Used after driver_exit() */
#endif
- ASSERT(!INETP(desc)->is_ignored);
+ ASSERT(!INET_IGNORED(INETP(desc)));
DEBUGF(("tcp_inet_input(%ld) {s=%d\r\n", port, desc->inet.s));
/* XXX fprintf(stderr,"tcp_inet_input(%ld) {s=%d}\r\n",(long) desc->inet.port, desc->inet.s); */
if (desc->inet.state == INET_STATE_ACCEPTING) {
@@ -11443,8 +11454,8 @@ static int tcp_sendv(tcp_descriptor* desc, ErlIOVec* ev)
DEBUGF(("tcp_sendv(%ld): s=%d, about to send "LLU","LLU" bytes\r\n",
(long)desc->inet.port, desc->inet.s, (llu_t)h_len, (llu_t)len));
- if (INETP(desc)->is_ignored) {
- INETP(desc)->is_ignored |= INET_IGNORE_WRITE;
+ if (INET_IGNORED(INETP(desc))) {
+ INETP(desc)->flags |= INET_IGNORE_WRITE;
n = 0;
} else if (desc->tcp_add_flags & TCP_ADDF_DELAY_SEND) {
driver_enqv(ix, ev, 0);
@@ -11479,7 +11490,7 @@ static int tcp_sendv(tcp_descriptor* desc, ErlIOVec* ev)
DEBUGF(("tcp_sendv(%ld): s=%d, Send failed, queuing\r\n",
(long)desc->inet.port, desc->inet.s));
driver_enqv(ix, ev, n);
- if (!INETP(desc)->is_ignored)
+ if (!INET_IGNORED(INETP(desc)))
sock_select(INETP(desc),(FD_WRITE|FD_CLOSE), 1);
}
return 0;
@@ -11548,8 +11559,8 @@ static int tcp_send(tcp_descriptor* desc, char* ptr, ErlDrvSizeT len)
DEBUGF(("tcp_send(%ld): s=%d, about to send "LLU","LLU" bytes\r\n",
(long)desc->inet.port, desc->inet.s, (llu_t)h_len, (llu_t)len));
- if (INETP(desc)->is_ignored) {
- INETP(desc)->is_ignored |= INET_IGNORE_WRITE;
+ if (INET_IGNORED(INETP(desc))) {
+ INETP(desc)->flags |= INET_IGNORE_WRITE;
n = 0;
} else if (desc->tcp_add_flags & TCP_ADDF_DELAY_SEND) {
sock_send(desc->inet.s, buf, 0, 0);
@@ -11582,7 +11593,7 @@ static int tcp_send(tcp_descriptor* desc, char* ptr, ErlDrvSizeT len)
n -= h_len;
driver_enq(ix, ptr+n, len-n);
}
- if (!INETP(desc)->is_ignored)
+ if (!INET_IGNORED(INETP(desc)))
sock_select(INETP(desc),(FD_WRITE|FD_CLOSE), 1);
}
return 0;
@@ -11863,7 +11874,7 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
int ret = 0;
ErlDrvPort ix = desc->inet.port;
- ASSERT(!INETP(desc)->is_ignored);
+ ASSERT(!INET_IGNORED(INETP(desc)));
DEBUGF(("tcp_inet_output(%ld) {s=%d\r\n",
(long)desc->inet.port, desc->inet.s));
if (desc->inet.state == INET_STATE_CONNECTING) {
@@ -12538,9 +12549,12 @@ static void packet_inet_timeout(ErlDrvData e)
{
udp_descriptor * udesc = (udp_descriptor*) e;
inet_descriptor * desc = INETP(udesc);
- if (!(desc->active))
+ if (!(desc->active)) {
sock_select(desc, FD_READ, 0);
- async_error_am (desc, am_timeout);
+ async_error_am (desc, am_timeout);
+ } else {
+ (void)packet_inet_input(udesc, desc->s);
+ }
}
@@ -12896,6 +12910,15 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
sock_select(desc, FD_READ, 1);
}
#endif
+
+ /* We set a timer on the port to trigger now.
+ This emulates a "yield" operation as that is
+ what we want to do here. We do *NOT* do a deselect
+ as that is expensive, instead we check if the
+ socket it still active when the timeout triggers
+ and if it is not, then we just ignore the timeout */
+ driver_set_timer(desc->port, 0);
+
return count;
}
diff --git a/erts/lib_src/Makefile.in b/erts/lib_src/Makefile.in
index 8e1f5b58c4..1da11c2d0a 100644
--- a/erts/lib_src/Makefile.in
+++ b/erts/lib_src/Makefile.in
@@ -29,7 +29,6 @@ CC=@CC@
LD=@LD@
AR=@AR@
RANLIB=@RANLIB@
-RM=@RM@
MKDIR=@MKDIR@
INSTALL=@INSTALL@
INSTALL_DIR=@INSTALL_DIR@
@@ -536,9 +535,9 @@ release_docs_spec:
#
.PHONY: clean
clean:
- $(RM) -rf ../lib/internal/$(TARGET)/*
- $(RM) -rf ../lib/$(TARGET)/*
- $(RM) -rf obj/$(TARGET)/*
+ $(RM) -r ../lib/internal/$(TARGET)/*
+ $(RM) -r ../lib/$(TARGET)/*
+ $(RM) -r obj/$(TARGET)/*
#
# Make dependencies
diff --git a/erts/vsn.mk b/erts/vsn.mk
index e4bdb1a8eb..bab5c805eb 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -18,7 +18,7 @@
# %CopyrightEnd%
#
-VSN = 10.2.4
+VSN = 10.2.5
# Port number 4365 in 4.2
# Port number 4366 in 4.3
diff --git a/lib/Makefile b/lib/Makefile
index 6605c6145c..ea2827bef0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -58,10 +58,10 @@ else
SUB_DIRECTORIES = hipe parsetools asn1/src
else
ifdef TERTIARY_BOOTSTRAP
- SUB_DIRECTORIES = snmp sasl jinterface syntax_tools wx
+ SUB_DIRECTORIES = snmp sasl erl_interface jinterface syntax_tools wx
else
ifdef DOC_BOOTSTRAP
- SUB_DIRECTORIES = xmerl edoc erl_docgen
+ SUB_DIRECTORIES = xmerl edoc erl_docgen public_key
else # Not bootstrap build
SUB_DIRECTORIES = $(ERTS_APPLICATIONS) \
$(ERLANG_APPLICATIONS) \
diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl
index 296c095be2..ab8caa1a0d 100644
--- a/lib/compiler/src/beam_validator.erl
+++ b/lib/compiler/src/beam_validator.erl
@@ -579,7 +579,11 @@ valfun_1({get_tuple_element,Src,N,Dst}, Vst) ->
Type = get_element_type(Index, Src, Vst),
extract_term(Type, {bif,element}, [Index, Src], Dst, Vst);
valfun_1({jump,{f,Lbl}}, Vst) ->
- kill_state(branch_state(Lbl, Vst));
+ branch(Lbl, Vst,
+ fun(SuccVst) ->
+ %% The next instruction is never executed.
+ kill_state(SuccVst)
+ end);
valfun_1(I, Vst) ->
valfun_2(I, Vst).
@@ -589,14 +593,17 @@ init_try_catch_branch(Tag, Dst, Fail, Vst0) ->
St = St0#st{ct=[[Fail]|Fails]},
Vst = Vst0#vst{current=St},
- complex_test(Fail,
- fun(CatchVst) ->
- #vst{current=#st{ys=Ys}} = CatchVst,
- maps:fold(fun init_catch_handler_1/3, CatchVst, Ys)
- end,
- fun(SuccVst) ->
- SuccVst
- end, Vst).
+ branch(Fail, Vst,
+ fun(CatchVst) ->
+ #vst{current=#st{ys=Ys}} = CatchVst,
+ maps:fold(fun init_catch_handler_1/3, CatchVst, Ys)
+ end,
+ fun(SuccVst) ->
+ %% All potentially-throwing instructions after this
+ %% one will implicitly branch to the fail label;
+ %% see valfun_2/2
+ SuccVst
+ end).
%% Set the initial state at the try/catch label. Assume that Y registers
%% contain terms or try/catch tags.
@@ -607,21 +614,27 @@ init_catch_handler_1(Reg, uninitialized, Vst) ->
init_catch_handler_1(_, _, Vst) ->
Vst.
-%% Update branched state if necessary and try next set of instructions.
-valfun_2(I, #vst{current=#st{ct=[]}}=Vst) ->
- valfun_3(I, Vst);
valfun_2(I, #vst{current=#st{ct=[[Fail]|_]}}=Vst) when is_integer(Fail) ->
- %% Update branched state.
+ %% We have an active try/catch tag and we can jump there from this
+ %% instruction, so we need to update the branched state of the try/catch
+ %% handler.
valfun_3(I, branch_state(Fail, Vst));
+valfun_2(I, #vst{current=#st{ct=[]}}=Vst) ->
+ valfun_3(I, Vst);
valfun_2(_, _) ->
error(ambiguous_catch_try_state).
%% Handle the remaining floating point instructions here.
%% Floating point.
-valfun_3({fconv,Src,{fr,_}=Dst}, Vst0) ->
- assert_term(Src, Vst0),
- Vst = update_type(fun meet/2, number, Src, Vst0),
- set_freg(Dst, Vst);
+valfun_3({fconv,Src,{fr,_}=Dst}, Vst) ->
+ assert_term(Src, Vst),
+
+ %% An exception is raised on error, hence branching to 0.
+ branch(0, Vst,
+ fun(SuccVst0) ->
+ SuccVst = update_type(fun meet/2, number, Src, SuccVst0),
+ set_freg(Dst, SuccVst)
+ end);
valfun_3({bif,fadd,_,[_,_]=Ss,Dst}, Vst) ->
float_op(Ss, Dst, Vst);
valfun_3({bif,fdiv,_,[_,_]=Ss,Dst}, Vst) ->
@@ -682,67 +695,87 @@ valfun_4({call_ext_last,_,_,_}, #vst{current=#st{numy=NumY}}) ->
valfun_4({make_fun2,_,_,_,Live}, Vst) ->
call(make_fun, Live, Vst);
%% Other BIFs
-valfun_4({bif,element,{f,Fail},[Pos,Tuple],Dst}, Vst0) ->
- PosType = get_term_type(Pos, Vst0),
- ElementType = get_element_type(PosType, Tuple, Vst0),
- InferredType = {tuple,[get_tuple_size(PosType)],#{}},
- Vst1 = branch_state(Fail, Vst0),
- Vst2 = update_type(fun meet/2, InferredType, Tuple, Vst1),
- Vst = update_type(fun meet/2, {integer,[]}, Pos, Vst2),
- extract_term(ElementType, {bif,element}, [Pos,Tuple], Dst, Vst);
+valfun_4({bif,element,{f,Fail},[Pos,Src],Dst}, Vst) ->
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ PosType = get_term_type(Pos, SuccVst0),
+ TupleType = {tuple,[get_tuple_size(PosType)],#{}},
+
+ SuccVst1 = update_type(fun meet/2, TupleType,
+ Src, SuccVst0),
+ SuccVst = update_type(fun meet/2, {integer,[]},
+ Pos, SuccVst1),
+
+ ElementType = get_element_type(PosType, Src, SuccVst),
+ extract_term(ElementType, {bif,element}, [Pos,Src],
+ Dst, SuccVst)
+ end);
valfun_4({bif,raise,{f,0},Src,_Dst}, Vst) ->
validate_src(Src, Vst),
kill_state(Vst);
valfun_4(raw_raise=I, Vst) ->
call(I, 3, Vst);
-valfun_4({bif,Op,{f,Fail},[Cons]=Ss,Dst}, Vst0)
- when Op =:= hd; Op =:= tl ->
- validate_src(Ss, Vst0),
- Vst = type_test(Fail, cons, Cons, Vst0),
- Type = bif_return_type(Op, Ss, Vst),
- extract_term(Type, {bif,Op}, Ss, Dst, Vst);
-valfun_4({bif,Op,{f,Fail},Ss,Dst}, Vst0) ->
- validate_src(Ss, Vst0),
- Vst1 = branch_state(Fail, Vst0),
-
- %% Infer argument types. Note that we can't type_test in the general case
- %% as the BIF could fail for reasons other than bad argument types.
- ArgTypes = bif_arg_types(Op, Ss),
- Vst = foldl(fun({Arg, T}, Vsti) ->
- update_type(fun meet/2, T, Arg, Vsti)
- end, Vst1, zip(Ss, ArgTypes)),
-
- Type = bif_return_type(Op, Ss, Vst),
- extract_term(Type, {bif,Op}, Ss, Dst, Vst);
+valfun_4({bif,Op,{f,Fail},[Src]=Ss,Dst}, Vst) when Op =:= hd; Op =:= tl ->
+ assert_term(Src, Vst),
+ branch(Fail, Vst,
+ fun(FailVst) ->
+ update_type(fun subtract/2, cons, Src, FailVst)
+ end,
+ fun(SuccVst0) ->
+ SuccVst = update_type(fun meet/2, cons, Src, SuccVst0),
+ extract_term(term, {bif,Op}, Ss, Dst, SuccVst)
+ end);
+valfun_4({bif,Op,{f,Fail},Ss,Dst}, Vst) ->
+ validate_src(Ss, Vst),
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ %% Infer argument types. Note that we can't subtract
+ %% types as the BIF could fail for reasons other than
+ %% bad argument types.
+ ArgTypes = bif_arg_types(Op, Ss),
+ SuccVst = foldl(fun({Arg, T}, V) ->
+ update_type(fun meet/2, T, Arg, V)
+ end, SuccVst0, zip(Ss, ArgTypes)),
+ Type = bif_return_type(Op, Ss, SuccVst),
+ extract_term(Type, {bif,Op}, Ss, Dst, SuccVst)
+ end);
valfun_4({gc_bif,Op,{f,Fail},Live,Ss,Dst}, #vst{current=St0}=Vst0) ->
validate_src(Ss, Vst0),
verify_live(Live, Vst0),
verify_y_init(Vst0),
+
+ %% Heap allocations and X registers are killed regardless of whether we
+ %% fail or not, as we may fail after GC.
St = kill_heap_allocation(St0),
- Vst1 = Vst0#vst{current=St},
- Vst2 = branch_state(Fail, Vst1),
+ Vst = prune_x_regs(Live, Vst0#vst{current=St}),
+
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ ArgTypes = bif_arg_types(Op, Ss),
+ SuccVst = foldl(fun({Arg, T}, V) ->
+ update_type(fun meet/2, T, Arg, V)
+ end, SuccVst0, zip(Ss, ArgTypes)),
- ArgTypes = bif_arg_types(Op, Ss),
- Vst3 = foldl(fun({Arg, T}, Vsti) ->
- update_type(fun meet/2, T, Arg, Vsti)
- end, Vst2, zip(Ss, ArgTypes)),
+ Type = bif_return_type(Op, Ss, SuccVst),
- Type = bif_return_type(Op, Ss, Vst3),
- Vst = prune_x_regs(Live, Vst3),
- extract_term(Type, {gc_bif,Op}, Ss, Dst, Vst, Vst0);
+ %% We're passing Vst0 as the original because the
+ %% registers were pruned before the branch.
+ extract_term(Type, {gc_bif,Op}, Ss, Dst, SuccVst, Vst0)
+ end);
valfun_4(return, #vst{current=#st{numy=none}}=Vst) ->
assert_durable_term({x,0}, Vst),
kill_state(Vst);
valfun_4(return, #vst{current=#st{numy=NumY}}) ->
error({stack_frame,NumY});
-valfun_4({loop_rec,{f,Fail},Dst}, Vst0) ->
- %% This term may not be part of the root set until
- %% remove_message/0 is executed. If control transfers
- %% to the loop_rec_end/1 instruction, no part of
- %% this term must be stored in a Y register.
- Vst1 = branch_state(Fail, Vst0),
- {Ref, Vst} = new_value(term, loop_rec, [], Vst1),
- mark_fragile(Dst, set_reg_vref(Ref, Dst, Vst));
+valfun_4({loop_rec,{f,Fail},Dst}, Vst) ->
+ %% This term may not be part of the root set until remove_message/0 is
+ %% executed. If control transfers to the loop_rec_end/1 instruction, no
+ %% part of this term must be stored in a Y register.
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ {Ref, SuccVst} = new_value(term, loop_rec, [], SuccVst0),
+ mark_fragile(Dst, set_reg_vref(Ref, Dst, SuccVst))
+ end);
valfun_4({wait,_}, Vst) ->
verify_y_init(Vst),
kill_state(Vst);
@@ -769,14 +802,14 @@ valfun_4({set_tuple_element,Src,Tuple,N}, Vst) ->
Es = set_element_type({integer,I}, get_term_type(Src, Vst), Es0),
override_type({tuple, Sz, Es}, Tuple, Vst);
%% Match instructions.
-valfun_4({select_val,Src,{f,Fail},{list,Choices}}, Vst0) ->
- assert_term(Src, Vst0),
+valfun_4({select_val,Src,{f,Fail},{list,Choices}}, Vst) ->
+ assert_term(Src, Vst),
assert_choices(Choices),
- select_val_branches(Fail, Src, Choices, Vst0);
+ validate_select_val(Fail, Choices, Src, Vst);
valfun_4({select_tuple_arity,Tuple,{f,Fail},{list,Choices}}, Vst) ->
assert_type(tuple, Tuple, Vst),
assert_arities(Choices),
- select_arity_branches(Fail, Choices, Tuple, Vst);
+ validate_select_tuple_arity(Fail, Choices, Tuple, Vst);
%% New bit syntax matching instructions.
valfun_4({test,bs_start_match3,{f,Fail},Live,[Src],Dst}, Vst) ->
@@ -785,17 +818,17 @@ valfun_4({test,bs_start_match2,{f,Fail},Live,[Src,Slots],Dst}, Vst) ->
validate_bs_start_match(Fail, Live, bsm_match_state(Slots), Src, Dst, Vst);
valfun_4({test,bs_match_string,{f,Fail},[Ctx,_,_]}, Vst) ->
bsm_validate_context(Ctx, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst, fun(V) -> V end);
valfun_4({test,bs_skip_bits2,{f,Fail},[Ctx,Src,_,_]}, Vst) ->
bsm_validate_context(Ctx, Vst),
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst, fun(V) -> V end);
valfun_4({test,bs_test_tail2,{f,Fail},[Ctx,_]}, Vst) ->
bsm_validate_context(Ctx, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst, fun(V) -> V end);
valfun_4({test,bs_test_unit,{f,Fail},[Ctx,_]}, Vst) ->
bsm_validate_context(Ctx, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst, fun(V) -> V end);
valfun_4({test,bs_skip_utf8,{f,Fail},[Ctx,Live,_]}, Vst) ->
validate_bs_skip_utf(Fail, Ctx, Live, Vst);
valfun_4({test,bs_skip_utf16,{f,Fail},[Ctx,Live,_]}, Vst) ->
@@ -830,6 +863,10 @@ valfun_4({bs_set_position, Ctx, Pos}, Vst) ->
Vst;
%% Other test instructions.
+valfun_4({test,has_map_fields,{f,Lbl},Src,{list,List}}, Vst) ->
+ assert_type(map, Src, Vst),
+ assert_unique_map_keys(List),
+ branch(Lbl, Vst, fun(V) -> V end);
valfun_4({test,is_atom,{f,Lbl},[Src]}, Vst) ->
type_test(Lbl, {atom,[]}, Src, Vst);
valfun_4({test,is_binary,{f,Lbl},[Src]}, Vst) ->
@@ -850,70 +887,68 @@ valfun_4({test,is_number,{f,Lbl},[Src]}, Vst) ->
type_test(Lbl, number, Src, Vst);
valfun_4({test,is_list,{f,Lbl},[Src]}, Vst) ->
type_test(Lbl, list, Src, Vst);
+valfun_4({test,is_map,{f,Lbl},[Src]}, Vst) ->
+ type_test(Lbl, map, Src, Vst);
valfun_4({test,is_nil,{f,Lbl},[Src]}, Vst) ->
%% is_nil is an exact check against the 'nil' value, and should not be
%% treated as a simple type test.
assert_term(Src, Vst),
- complex_test(Lbl,
- fun(FailVst) ->
- update_ne_types(Src, nil, FailVst)
- end,
- fun(SuccVst) ->
- update_eq_types(Src, nil, SuccVst)
- end, Vst);
-valfun_4({test,is_map,{f,Lbl},[Src]}, Vst) ->
- case Src of
- {Tag,_} when Tag =:= x; Tag =:= y ->
- type_test(Lbl, map, Src, Vst);
- {literal,Map} when is_map(Map) ->
- Vst;
- _ ->
- assert_term(Src, Vst),
- kill_state(Vst)
- end;
-valfun_4({test,test_arity,{f,Lbl},[Tuple,Sz]}, Vst0) when is_integer(Sz) ->
- assert_type(tuple, Tuple, Vst0),
- Vst = branch_state(Lbl, Vst0),
- update_type(fun meet/2, {tuple,Sz,#{}}, Tuple, Vst);
-valfun_4({test,is_tagged_tuple,{f,Lbl},[Src,Sz,Atom]}, Vst0) ->
- assert_term(Src, Vst0),
- Vst = branch_state(Lbl, Vst0),
- update_type(fun meet/2, {tuple,Sz,#{ {integer,1} => Atom }}, Src, Vst);
-valfun_4({test,has_map_fields,{f,Lbl},Src,{list,List}}, Vst) ->
- assert_type(map, Src, Vst),
- assert_unique_map_keys(List),
- branch_state(Lbl, Vst);
+ branch(Lbl, Vst,
+ fun(FailVst) ->
+ update_ne_types(Src, nil, FailVst)
+ end,
+ fun(SuccVst) ->
+ update_eq_types(Src, nil, SuccVst)
+ end);
+valfun_4({test,test_arity,{f,Lbl},[Tuple,Sz]}, Vst) when is_integer(Sz) ->
+ assert_type(tuple, Tuple, Vst),
+ Type = {tuple, Sz, #{}},
+ type_test(Lbl, Type, Tuple, Vst);
+valfun_4({test,is_tagged_tuple,{f,Lbl},[Src,Sz,Atom]}, Vst) ->
+ assert_term(Src, Vst),
+ Type = {tuple, Sz, #{ {integer,1} => Atom }},
+ type_test(Lbl, Type, Src, Vst);
valfun_4({test,is_eq_exact,{f,Lbl},[Src,Val]=Ss}, Vst) ->
validate_src(Ss, Vst),
- complex_test(Lbl,
- fun(FailVst) ->
- update_ne_types(Src, Val, FailVst)
- end,
- fun(SuccVst) ->
- update_eq_types(Src, Val, SuccVst)
- end, Vst);
+ branch(Lbl, Vst,
+ fun(FailVst) ->
+ update_ne_types(Src, Val, FailVst)
+ end,
+ fun(SuccVst) ->
+ update_eq_types(Src, Val, SuccVst)
+ end);
valfun_4({test,is_ne_exact,{f,Lbl},[Src,Val]=Ss}, Vst) ->
validate_src(Ss, Vst),
- complex_test(Lbl,
- fun(FailVst) ->
- update_eq_types(Src, Val, FailVst)
- end,
- fun(SuccVst) ->
- update_ne_types(Src, Val, SuccVst)
- end, Vst);
+ branch(Lbl, Vst,
+ fun(FailVst) ->
+ update_eq_types(Src, Val, FailVst)
+ end,
+ fun(SuccVst) ->
+ update_ne_types(Src, Val, SuccVst)
+ end);
valfun_4({test,_Op,{f,Lbl},Src}, Vst) ->
+ %% is_pid, is_reference, et cetera.
validate_src(Src, Vst),
- branch_state(Lbl, Vst);
+ branch(Lbl, Vst, fun(V) -> V end);
valfun_4({bs_add,{f,Fail},[A,B,_],Dst}, Vst) ->
assert_term(A, Vst),
assert_term(B, Vst),
- create_term({integer,[]}, bs_add, [A, B], Dst, branch_state(Fail, Vst));
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ create_term({integer,[]}, bs_add, [A, B], Dst, SuccVst)
+ end);
valfun_4({bs_utf8_size,{f,Fail},A,Dst}, Vst) ->
assert_term(A, Vst),
- create_term({integer,[]}, bs_utf8_size, [A], Dst, branch_state(Fail, Vst));
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ create_term({integer,[]}, bs_utf8_size, [A], Dst, SuccVst)
+ end);
valfun_4({bs_utf16_size,{f,Fail},A,Dst}, Vst) ->
assert_term(A, Vst),
- create_term({integer,[]}, bs_utf16_size, [A], Dst, branch_state(Fail, Vst));
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ create_term({integer,[]}, bs_utf16_size, [A], Dst, SuccVst)
+ end);
valfun_4({bs_init2,{f,Fail},Sz,Heap,Live,_,Dst}, Vst0) ->
verify_live(Live, Vst0),
verify_y_init(Vst0),
@@ -923,10 +958,12 @@ valfun_4({bs_init2,{f,Fail},Sz,Heap,Live,_,Dst}, Vst0) ->
true ->
assert_term(Sz, Vst0)
end,
- Vst1 = heap_alloc(Heap, Vst0),
- Vst2 = branch_state(Fail, Vst1),
- Vst = prune_x_regs(Live, Vst2),
- create_term(binary, bs_init2, [], Dst, Vst, Vst0);
+ Vst = heap_alloc(Heap, Vst0),
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ SuccVst = prune_x_regs(Live, SuccVst0),
+ create_term(binary, bs_init2, [], Dst, SuccVst, SuccVst0)
+ end);
valfun_4({bs_init_bits,{f,Fail},Sz,Heap,Live,_,Dst}, Vst0) ->
verify_live(Live, Vst0),
verify_y_init(Vst0),
@@ -936,47 +973,71 @@ valfun_4({bs_init_bits,{f,Fail},Sz,Heap,Live,_,Dst}, Vst0) ->
true ->
assert_term(Sz, Vst0)
end,
- Vst1 = heap_alloc(Heap, Vst0),
- Vst2 = branch_state(Fail, Vst1),
- Vst = prune_x_regs(Live, Vst2),
- create_term(binary, bs_init_bits, [], Dst, Vst);
+ Vst = heap_alloc(Heap, Vst0),
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ SuccVst = prune_x_regs(Live, SuccVst0),
+ create_term(binary, bs_init_bits, [], Dst, SuccVst)
+ end);
valfun_4({bs_append,{f,Fail},Bits,Heap,Live,_Unit,Bin,_Flags,Dst}, Vst0) ->
verify_live(Live, Vst0),
verify_y_init(Vst0),
assert_term(Bits, Vst0),
assert_term(Bin, Vst0),
- Vst1 = heap_alloc(Heap, Vst0),
- Vst2 = branch_state(Fail, Vst1),
- Vst = prune_x_regs(Live, Vst2),
- create_term(binary, bs_append, [Bin], Dst, Vst, Vst0);
-valfun_4({bs_private_append,{f,Fail},Bits,_Unit,Bin,_Flags,Dst}, Vst0) ->
- assert_term(Bits, Vst0),
- assert_term(Bin, Vst0),
- Vst = branch_state(Fail, Vst0),
- create_term(binary, bs_private_append, [Bin], Dst, Vst);
+ Vst = heap_alloc(Heap, Vst0),
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ SuccVst = prune_x_regs(Live, SuccVst0),
+ create_term(binary, bs_append, [Bin], Dst, SuccVst, SuccVst0)
+ end);
+valfun_4({bs_private_append,{f,Fail},Bits,_Unit,Bin,_Flags,Dst}, Vst) ->
+ assert_term(Bits, Vst),
+ assert_term(Bin, Vst),
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ create_term(binary, bs_private_append, [Bin], Dst, SuccVst)
+ end);
valfun_4({bs_put_string,Sz,_}, Vst) when is_integer(Sz) ->
Vst;
valfun_4({bs_put_binary,{f,Fail},Sz,_,_,Src}, Vst) ->
assert_term(Sz, Vst),
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, binary, Src, SuccVst)
+ end);
valfun_4({bs_put_float,{f,Fail},Sz,_,_,Src}, Vst) ->
assert_term(Sz, Vst),
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, {float,[]}, Src, SuccVst)
+ end);
valfun_4({bs_put_integer,{f,Fail},Sz,_,_,Src}, Vst) ->
assert_term(Sz, Vst),
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, {integer,[]}, Src, SuccVst)
+ end);
valfun_4({bs_put_utf8,{f,Fail},_,Src}, Vst) ->
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, {integer,[]}, Src, SuccVst)
+ end);
valfun_4({bs_put_utf16,{f,Fail},_,Src}, Vst) ->
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, {integer,[]}, Src, SuccVst)
+ end);
valfun_4({bs_put_utf32,{f,Fail},_,Src}, Vst) ->
assert_term(Src, Vst),
- branch_state(Fail, Vst);
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ update_type(fun meet/2, {integer,[]}, Src, SuccVst)
+ end);
%% Map instructions.
valfun_4({put_map_assoc=Op,{f,Fail},Src,Dst,Live,{list,List}}, Vst) ->
verify_put_map(Op, Fail, Src, Dst, Live, List, Vst);
@@ -991,15 +1052,15 @@ verify_get_map(Fail, Src, List, Vst0) ->
assert_not_literal(Src), %OTP 22.
assert_type(map, Src, Vst0),
- complex_test(Fail,
- fun(FailVst) ->
- clobber_map_vals(List, Src, FailVst)
- end,
- fun(SuccVst) ->
- Keys = extract_map_keys(List),
- assert_unique_map_keys(Keys),
- extract_map_vals(List, Src, SuccVst, SuccVst)
- end, Vst0).
+ branch(Fail, Vst0,
+ fun(FailVst) ->
+ clobber_map_vals(List, Src, FailVst)
+ end,
+ fun(SuccVst) ->
+ Keys = extract_map_keys(List),
+ assert_unique_map_keys(Keys),
+ extract_map_vals(List, Src, SuccVst, SuccVst)
+ end).
%% get_map_elements may leave its destinations in an inconsistent state when
%% the fail label is taken. Consider the following:
@@ -1033,13 +1094,16 @@ verify_put_map(Op, Fail, Src, Dst, Live, List, Vst0) ->
assert_type(map, Src, Vst0),
verify_live(Live, Vst0),
verify_y_init(Vst0),
- [assert_term(Term, Vst0) || Term <- List],
- Vst1 = heap_alloc(0, Vst0),
- Vst2 = branch_state(Fail, Vst1),
- Vst = prune_x_regs(Live, Vst2),
- Keys = extract_map_keys(List),
- assert_unique_map_keys(Keys),
- create_term(map, Op, [Src], Dst, Vst, Vst0).
+ _ = [assert_term(Term, Vst0) || Term <- List],
+ Vst = heap_alloc(0, Vst0),
+
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ SuccVst = prune_x_regs(Live, SuccVst0),
+ Keys = extract_map_keys(List),
+ assert_unique_map_keys(Keys),
+ create_term(map, Op, [Src], Dst, SuccVst, SuccVst0)
+ end).
%%
%% Common code for validating bs_start_match* instructions.
@@ -1052,40 +1116,60 @@ validate_bs_start_match(Fail, Live, Type, Src, Dst, Vst) ->
%% #ms{} can represent either a match context or a term, so we have to mark
%% the source as a term if it fails with a match context as an input. This
%% hack is only needed until we get proper union types.
- complex_test(Fail,
- fun(FailVst) ->
- case get_raw_type(Src, FailVst) of
- #ms{} -> override_type(term, Src, FailVst);
- _ -> FailVst
- end
- end,
- fun(SuccVst0) ->
- SuccVst1 = update_type(fun meet/2, binary, Src, SuccVst0),
- SuccVst = prune_x_regs(Live, SuccVst1),
- extract_term(Type, bs_start_match, [Src], Dst,
- SuccVst, SuccVst0)
- end, Vst).
+ branch(Fail, Vst,
+ fun(FailVst) ->
+ case get_movable_term_type(Src, FailVst) of
+ #ms{} -> override_type(term, Src, FailVst);
+ _ -> FailVst
+ end
+ end,
+ fun(SuccVst0) ->
+ SuccVst1 = update_type(fun meet/2, binary,
+ Src, SuccVst0),
+ SuccVst = prune_x_regs(Live, SuccVst1),
+ extract_term(Type, bs_start_match, [Src], Dst,
+ SuccVst, SuccVst0)
+ end).
%%
%% Common code for validating bs_get* instructions.
%%
-validate_bs_get(Op, Fail, Ctx, Live, Type, Dst, Vst0) ->
- bsm_validate_context(Ctx, Vst0),
- verify_live(Live, Vst0),
- verify_y_init(Vst0),
- Vst1 = prune_x_regs(Live, Vst0),
- Vst = branch_state(Fail, Vst1),
- extract_term(Type, Op, [Ctx], Dst, Vst, Vst0).
+validate_bs_get(Op, Fail, Ctx, Live, Type, Dst, Vst) ->
+ bsm_validate_context(Ctx, Vst),
+ verify_live(Live, Vst),
+ verify_y_init(Vst),
+
+ branch(Fail, Vst,
+ fun(SuccVst0) ->
+ SuccVst = prune_x_regs(Live, SuccVst0),
+ extract_term(Type, Op, [Ctx], Dst, SuccVst, SuccVst0)
+ end).
%%
%% Common code for validating bs_skip_utf* instructions.
%%
-validate_bs_skip_utf(Fail, Ctx, Live, Vst0) ->
- bsm_validate_context(Ctx, Vst0),
- verify_y_init(Vst0),
- verify_live(Live, Vst0),
- Vst = prune_x_regs(Live, Vst0),
- branch_state(Fail, Vst).
+validate_bs_skip_utf(Fail, Ctx, Live, Vst) ->
+ bsm_validate_context(Ctx, Vst),
+ verify_y_init(Vst),
+ verify_live(Live, Vst),
+
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ prune_x_regs(Live, SuccVst)
+ end).
+
+%%
+%% Common code for is_$type instructions.
+%%
+type_test(Fail, Type, Reg, Vst) ->
+ assert_term(Reg, Vst),
+ branch(Fail, Vst,
+ fun(FailVst) ->
+ update_type(fun subtract/2, Type, Reg, FailVst)
+ end,
+ fun(SuccVst) ->
+ update_type(fun meet/2, Type, Reg, SuccVst)
+ end).
%%
%% Special state handling for setelement/3 and set_tuple_element/3 instructions.
@@ -1156,9 +1240,8 @@ verify_local_args(-1, _Lbl, _CtxIds, _Vst) ->
ok;
verify_local_args(X, Lbl, CtxIds, Vst) ->
Reg = {x, X},
- assert_movable(Reg, Vst),
assert_not_fragile(Reg, Vst),
- case get_raw_type(Reg, Vst) of
+ case get_movable_term_type(Reg, Vst) of
#ms{id=Id}=Type ->
case CtxIds of
#{ Id := Other } ->
@@ -1347,7 +1430,7 @@ assert_arities(_) -> error(bad_tuple_arity_list).
%%%
float_op(Ss, Dst, Vst0) ->
- [assert_freg_set(S, Vst0) || S <- Ss],
+ _ = [assert_freg_set(S, Vst0) || S <- Ss],
assert_fls(cleared, Vst0),
Vst = set_fls(cleared, Vst0),
set_freg(Dst, Vst).
@@ -1424,7 +1507,7 @@ bsm_validate_context(Reg, Vst) ->
ok.
bsm_get_context({Kind,_}=Reg, Vst) when Kind =:= x; Kind =:= y->
- case get_raw_type(Reg, Vst) of
+ case get_movable_term_type(Reg, Vst) of
#ms{}=Ctx -> Ctx;
_ -> error({no_bsm_context,Reg})
end;
@@ -1459,44 +1542,46 @@ bsm_restore(Reg, SavePoint, Vst) ->
_ -> error({illegal_restore,SavePoint,range})
end.
-select_val_branches(Fail, Src, Choices, Vst0) ->
- Vst = svb_1(Choices, Src, Vst0),
- kill_state(branch_state(Fail, Vst)).
-
-svb_1([Val,{f,L}|T], Src, Vst0) ->
- Vst = complex_test(L,
- fun(BranchVst) ->
- update_eq_types(Src, Val, BranchVst)
- end,
- fun(FailVst) ->
- update_ne_types(Src, Val, FailVst)
- end, Vst0),
- svb_1(T, Src, Vst);
-svb_1([], _, Vst) ->
- Vst.
-
-select_arity_branches(Fail, List, Tuple, Vst0) ->
- Type = get_term_type(Tuple, Vst0),
- Vst = sab_1(List, Tuple, Type, Vst0),
- kill_state(branch_state(Fail, Vst)).
-
-sab_1([Sz,{f,L}|T], Tuple, {tuple,[_],Es}=Type0, Vst0) ->
- #vst{current=St0} = Vst0,
- Vst1 = update_type(fun meet/2, {tuple,Sz,Es}, Tuple, Vst0),
- Vst2 = branch_state(L, Vst1),
- Vst = Vst2#vst{current=St0},
-
- sab_1(T, Tuple, Type0, Vst);
-sab_1([Sz,{f,L}|T], Tuple, {tuple,Sz,_Es}=Type, Vst0) ->
- %% The type is already correct. (This test is redundant.)
- Vst = branch_state(L, Vst0),
- sab_1(T, Tuple, Type, Vst);
-sab_1([_,{f,_}|T], Tuple, Type, Vst) ->
- %% We already have an established different exact size for the tuple.
- %% This label can't possibly be reached.
- sab_1(T, Tuple, Type, Vst);
-sab_1([], _, _, #vst{}=Vst) ->
- Vst.
+validate_select_val(_Fail, _Choices, _Src, #vst{current=none}=Vst) ->
+ %% We've already branched on all of Src's possible values, so we know we
+ %% can't reach the fail label or any of the remaining choices.
+ Vst;
+validate_select_val(Fail, [Val,{f,L}|T], Src, Vst0) ->
+ Vst = branch(L, Vst0,
+ fun(BranchVst) ->
+ update_eq_types(Src, Val, BranchVst)
+ end,
+ fun(FailVst) ->
+ update_ne_types(Src, Val, FailVst)
+ end),
+ validate_select_val(Fail, T, Src, Vst);
+validate_select_val(Fail, [], _, Vst) ->
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ %% The next instruction is never executed.
+ kill_state(SuccVst)
+ end).
+
+validate_select_tuple_arity(_Fail, _Choices, _Src, #vst{current=none}=Vst) ->
+ %% We've already branched on all of Src's possible values, so we know we
+ %% can't reach the fail label or any of the remaining choices.
+ Vst;
+validate_select_tuple_arity(Fail, [Arity,{f,L}|T], Tuple, Vst0) ->
+ Type = {tuple, Arity, #{}},
+ Vst = branch(L, Vst0,
+ fun(BranchVst) ->
+ update_type(fun meet/2, Type, Tuple, BranchVst)
+ end,
+ fun(FailVst) ->
+ update_type(fun subtract/2, Type, Tuple, FailVst)
+ end),
+ validate_select_tuple_arity(Fail, T, Tuple, Vst);
+validate_select_tuple_arity(Fail, [], _, #vst{}=Vst) ->
+ branch(Fail, Vst,
+ fun(SuccVst) ->
+ %% The next instruction is never executed.
+ kill_state(SuccVst)
+ end).
infer_types({Kind,_}=Reg, Vst) when Kind =:= x; Kind =:= y ->
infer_types(get_reg_vref(Reg, Vst), Vst);
@@ -1628,26 +1713,6 @@ resolve_args([Lit | Args], Vst) ->
resolve_args([], _Vst) ->
[].
-%% Helper functions for tests that alter state on both the success and fail
-%% branches, keeping the states from tainting each other.
-complex_test(Fail, FailFun, SuccFun, Vst0) ->
- #vst{current=St0} = Vst0,
- Vst1 = FailFun(Vst0),
- Vst2 = branch_state(Fail, Vst1),
- Vst = Vst2#vst{current=St0},
- SuccFun(Vst).
-
-%% Helper function for simple "is_type" tests.
-type_test(Fail, Type, Reg, Vst) ->
- assert_term(Reg, Vst),
- complex_test(Fail,
- fun(FailVst) ->
- update_type(fun subtract/2, Type, Reg, FailVst)
- end,
- fun(SuccVst) ->
- update_type(fun meet/2, Type, Reg, SuccVst)
- end, Vst).
-
%% Overrides the type of Reg. This is ugly but a necessity for certain
%% destructive operations.
override_type(Type, Reg, Vst) ->
@@ -1655,7 +1720,7 @@ override_type(Type, Reg, Vst) ->
%% This is used when linear code finds out more and more information about a
%% type, so that the type gets more specialized.
-update_type(Merge, Type0, #value_ref{}=Ref, Vst) ->
+update_type(Merge, With, #value_ref{}=Ref, Vst) ->
%% If the old type can't be merged with the new one, the type information
%% is inconsistent and we know that some instructions will never be
%% executed at run-time. For example:
@@ -1664,21 +1729,47 @@ update_type(Merge, Type0, #value_ref{}=Ref, Vst) ->
%% {test,is_tuple,Fail,[Reg]}.
%% {test,test_arity,Fail,[Reg,5]}.
%%
- %% Note that the test_arity instruction can never be reached, so we use the
- %% new type instead of 'none'.
- Type = case Merge(get_raw_type(Ref, Vst), Type0) of
- none -> Type0;
- T -> T
- end,
- set_type(Type, Ref, Vst);
-update_type(Merge, Type, {Kind,_}=Reg, Vst) when Kind =:= x; Kind =:= y ->
- update_type(Merge, Type, get_reg_vref(Reg, Vst), Vst);
-update_type(_Merge, _Type, Literal, Vst) ->
+ %% Note that the test_arity instruction can never be reached, so we need to
+ %% kill the state to avoid raising an error when we encounter it.
+ %%
+ %% Simply returning `kill_state(Vst)` is unsafe however as we might be in
+ %% the middle of an instruction, and altering the rest of the validator
+ %% (eg. prune_x_regs/2) to no-op on dead states is prone to error.
+ %%
+ %% We therefore throw a 'type_conflict' error instead, which causes
+ %% validation to fail unless we're in a context where such errors can be
+ %% handled, such as in a branch handler.
+ Current = get_raw_type(Ref, Vst),
+ case Merge(Current, With) of
+ none -> throw({type_conflict, Current, With});
+ Type -> set_type(Type, Ref, Vst)
+ end;
+update_type(Merge, With, {Kind,_}=Reg, Vst) when Kind =:= x; Kind =:= y ->
+ update_type(Merge, With, get_reg_vref(Reg, Vst), Vst);
+update_type(Merge, With, Literal, Vst) ->
assert_literal(Literal),
- Vst.
+ %% Literals always retain their type, but we still need to bail on type
+ %% conflicts.
+ case Merge(Literal, With) of
+ none -> throw({type_conflict, Literal, With});
+ _Type -> Vst
+ end.
update_ne_types(LHS, RHS, Vst) ->
- update_type(fun subtract/2, get_term_type(RHS, Vst), LHS, Vst).
+ %% While updating types on equality is fairly straightforward, inequality
+ %% is a bit trickier since all we know is that the *value* of LHS differs
+ %% from RHS, so we can't blindly subtract their types.
+ %%
+ %% Consider `number =/= {integer,[]}`; all we know is that LHS isn't equal
+ %% to some *specific integer* of unknown value, and if we were to subtract
+ %% {integer,[]} we would erroneously infer that the new type is {float,[]}.
+ %%
+ %% Therefore, we only subtract when we know that RHS has a specific value.
+ RType = get_term_type(RHS, Vst),
+ case is_literal(RType) of
+ true -> update_type(fun subtract/2, RType, LHS, Vst);
+ false -> Vst
+ end.
update_eq_types(LHS, RHS, Vst0) ->
Infer = infer_types(LHS, Vst0),
@@ -1786,19 +1877,27 @@ assert_term(Src, Vst) ->
ok.
assert_movable(Src, Vst) ->
- _ = get_move_term_type(Src, Vst),
+ _ = get_movable_term_type(Src, Vst),
ok.
-assert_literal(nil) -> ok;
-assert_literal({atom,A}) when is_atom(A) -> ok;
-assert_literal({float,F}) when is_float(F) -> ok;
-assert_literal({integer,I}) when is_integer(I) -> ok;
-assert_literal({literal,_L}) -> ok;
-assert_literal(T) -> error({literal_required,T}).
+assert_literal(Src) ->
+ case is_literal(Src) of
+ true -> ok;
+ false -> error({literal_required,Src})
+ end.
+
+assert_not_literal(Src) ->
+ case is_literal(Src) of
+ true -> error({literal_not_allowed,Src});
+ false -> ok
+ end.
-assert_not_literal({x,_}) -> ok;
-assert_not_literal({y,_}) -> ok;
-assert_not_literal(Literal) -> error({literal_not_allowed,Literal}).
+is_literal(nil) -> true;
+is_literal({atom,A}) when is_atom(A) -> true;
+is_literal({float,F}) when is_float(F) -> true;
+is_literal({integer,I}) when is_integer(I) -> true;
+is_literal({literal,_L}) -> true;
+is_literal(_) -> false.
%% The possible types.
%%
@@ -2050,6 +2149,7 @@ assert_tuple_elements(Limit, Es) ->
%% Subtract Type2 from Type2. Example:
%% subtract(list, nil) -> cons
+subtract(Same, Same) -> none;
subtract(list, nil) -> cons;
subtract(list, cons) -> nil;
subtract(number, {integer,[]}) -> {float,[]};
@@ -2082,11 +2182,10 @@ assert_type(Needed, Actual) ->
get_element_type(Key, Src, Vst) ->
get_element_type_1(Key, get_term_type(Src, Vst)).
-get_element_type_1({integer,Index}=Key, {tuple,Sz,Es}) ->
+get_element_type_1({integer,_}=Key, {tuple,_Sz,Es}) ->
case Es of
#{ Key := Type } -> Type;
- #{} when Index =< Sz -> term;
- #{} -> none
+ #{} -> term
end;
get_element_type_1(_Index, _Type) ->
term.
@@ -2103,7 +2202,7 @@ get_tuple_size({integer,Sz}) -> Sz;
get_tuple_size(_) -> 0.
validate_src(Ss, Vst) when is_list(Ss) ->
- [assert_term(S, Vst) || S <- Ss],
+ _ = [assert_term(S, Vst) || S <- Ss],
ok.
%% get_term_type(Src, ValidatorState) -> Type
@@ -2111,22 +2210,23 @@ validate_src(Ss, Vst) when is_list(Ss) ->
%% a standard Erlang type (no catch/try tags or match contexts).
get_term_type(Src, Vst) ->
- case get_move_term_type(Src, Vst) of
+ case get_movable_term_type(Src, Vst) of
#ms{} -> error({match_context,Src});
Type -> Type
end.
-%% get_move_term_type(Src, ValidatorState) -> Type
+%% get_movable_term_type(Src, ValidatorState) -> Type
%% Get the type of the source Src. The returned type Type will be
%% a standard Erlang type (no catch/try tags). Match contexts are OK.
-get_move_term_type(Src, Vst) ->
+get_movable_term_type(Src, Vst) ->
case get_raw_type(Src, Vst) of
initialized -> error({unassigned,Src});
uninitialized -> error({uninitialized_reg,Src});
{catchtag,_} -> error({catchtag,Src});
{trytag,_} -> error({trytag,Src});
tuple_in_progress -> error({tuple_in_progress,Src});
+ {literal,_}=Lit -> get_literal_type(Lit);
Type -> Type
end.
@@ -2145,7 +2245,8 @@ get_tag_type(Src, _) ->
error({invalid_tag_register,Src}).
%% get_raw_type(Src, ValidatorState) -> Type
-%% Return the type of a register without doing any validity checks.
+%% Return the type of a register without doing any validity checks or
+%% conversions.
get_raw_type({x,X}=Src, #vst{current=#st{xs=Xs}}=Vst) when is_integer(X) ->
check_limit(Src),
case Xs of
@@ -2192,10 +2293,53 @@ glt_1(T) when is_tuple(T) ->
glt_1(L) ->
{literal, L}.
+%%%
+%%% Branch tracking
+%%%
+
+%% Forks the execution flow, with the provided funs returning the new state of
+%% their respective branch; the "fail" fun returns the state where the branch
+%% is taken, and the "success" fun returns the state where it's not.
+%%
+%% If either path is known not to be taken at runtime (eg. due to a type
+%% conflict), it will simply be discarded.
+-spec branch(Lbl :: label(),
+ Original :: #vst{},
+ FailFun :: BranchFun,
+ SuccFun :: BranchFun) -> #vst{} when
+ BranchFun :: fun((#vst{}) -> #vst{}).
+branch(Lbl, Vst0, FailFun, SuccFun) ->
+ #vst{current=St0} = Vst0,
+ try FailFun(Vst0) of
+ Vst1 ->
+ Vst2 = branch_state(Lbl, Vst1),
+ Vst = Vst2#vst{current=St0},
+ try SuccFun(Vst) of
+ V -> V
+ catch
+ {type_conflict, _, _} ->
+ %% The instruction is guaranteed to fail; kill the state.
+ kill_state(Vst)
+ end
+ catch
+ {type_conflict, _, _} ->
+ %% This instruction is guaranteed not to fail, so we run the
+ %% success branch *without* catching type conflicts to avoid hiding
+ %% errors in the validator itself; one of the branches must
+ %% succeed.
+ SuccFun(Vst0)
+ end.
+
+%% A shorthand version of branch/4 for when the state is only altered on
+%% success.
+branch(Fail, Vst, SuccFun) ->
+ branch(Fail, Vst, fun(V) -> V end, SuccFun).
+
+%% Directly branches off the state. This is an "internal" operation that should
+%% be used sparingly.
branch_state(0, #vst{}=Vst) ->
- %% If the instruction fails, the stack may be scanned
- %% looking for a catch tag. Therefore the Y registers
- %% must be initialized at this point.
+ %% If the instruction fails, the stack may be scanned looking for a catch
+ %% tag. Therefore the Y registers must be initialized at this point.
verify_y_init(Vst),
Vst;
branch_state(L, #vst{current=St,branched=B,ref_ctr=Counter0}=Vst) ->
diff --git a/lib/compiler/test/beam_validator_SUITE.erl b/lib/compiler/test/beam_validator_SUITE.erl
index 265da43f9d..8b39fce479 100644
--- a/lib/compiler/test/beam_validator_SUITE.erl
+++ b/lib/compiler/test/beam_validator_SUITE.erl
@@ -34,7 +34,7 @@
undef_label/1,illegal_instruction/1,failing_gc_guard_bif/1,
map_field_lists/1,cover_bin_opt/1,
val_dsetel/1,bad_tuples/1,bad_try_catch_nesting/1,
- receive_stacked/1,aliased_types/1]).
+ receive_stacked/1,aliased_types/1,type_conflict/1]).
-include_lib("common_test/include/ct.hrl").
@@ -63,7 +63,7 @@ groups() ->
undef_label,illegal_instruction,failing_gc_guard_bif,
map_field_lists,cover_bin_opt,val_dsetel,
bad_tuples,bad_try_catch_nesting,
- receive_stacked,aliased_types]}].
+ receive_stacked,aliased_types,type_conflict]}].
init_per_suite(Config) ->
test_lib:recompile(?MODULE),
@@ -156,8 +156,8 @@ call_last(Config) when is_list(Config) ->
merge_undefined(Config) when is_list(Config) ->
Errors = do_val(merge_undefined, Config),
[{{t,handle_call,2},
- {{call_ext,1,{extfunc,erlang,exit,1}},
- 10,
+ {{call_ext,2,{extfunc,debug,filter,2}},
+ 22,
{uninitialized_reg,{y,_}}}}] = Errors,
ok.
@@ -630,6 +630,27 @@ aliased_types_3(Bug) ->
hd(List)
end.
+
+%% ERL-867; validation proceeded after a type conflict, causing incorrect types
+%% to be joined.
+
+-record(r, { e1 = e1, e2 = e2 }).
+
+type_conflict(Config) when is_list(Config) ->
+ {e1, e2} = type_conflict_1(#r{}),
+ ok.
+
+type_conflict_1(C) ->
+ Src = id(C#r.e2),
+ TRes = try id(Src) of
+ R -> R
+ catch
+ %% C:R can never match, yet it assumed that the type of 'C' was
+ %% an atom from here on.
+ C:R -> R
+ end,
+ {C#r.e1, TRes}.
+
%%%-------------------------------------------------------------------------
transform_remove(Remove, Module) ->
diff --git a/lib/compiler/test/beam_validator_SUITE_data/merge_undefined.S b/lib/compiler/test/beam_validator_SUITE_data/merge_undefined.S
index 481d55045d..aa344807e4 100644
--- a/lib/compiler/test/beam_validator_SUITE_data/merge_undefined.S
+++ b/lib/compiler/test/beam_validator_SUITE_data/merge_undefined.S
@@ -15,8 +15,9 @@
{select_val,{x,0},{f,1},{list,[{atom,gurka},{f,3},{atom,delete},{f,4}]}}.
{label,3}.
{allocate_heap,2,6,2}.
- %% The Y registers are not initialized here.
{test,is_eq_exact,{f,5},[{x,0},{atom,ok}]}.
+ %% This is unreachable since {x,0} is known not to be 'ok'. We should not
+ %% fail with "uninitialized y registers" on erlang:exit/1
{move,{atom,nisse},{x,0}}.
{call_ext,1,{extfunc,erlang,exit,1}}.
{label,4}.
@@ -29,6 +30,7 @@
{call_ext,2,{extfunc,io,format,2}}.
{test,is_ne_exact,{f,6},[{x,0},{atom,ok}]}.
{label,5}.
+ %% The Y registers are not initialized here.
{move,{atom,logReader},{x,1}}.
{move,{atom,console},{x,0}}.
{call_ext,2,{extfunc,debug,filter,2}}.
diff --git a/lib/compiler/test/beam_validator_SUITE_data/receive_stacked.S b/lib/compiler/test/beam_validator_SUITE_data/receive_stacked.S
index 5b974119c6..a878204d16 100644
--- a/lib/compiler/test/beam_validator_SUITE_data/receive_stacked.S
+++ b/lib/compiler/test/beam_validator_SUITE_data/receive_stacked.S
@@ -240,7 +240,7 @@
{y,0}}.
{'%',{no_bin_opt,{binary_used_in,{test,is_binary,{f,34},[{y,0}]}},
[63,{file,"receive_stacked.erl"}]}}.
- {test,is_binary,{f,34},[{y,0}]}.
+ {test,is_eq_exact,{f,34},[{y,0},{literal,<<0,1,2,3>>}]}.
remove_message.
{move,{integer,42},{x,0}}.
{line,[{location,"receive_stacked.erl",64}]}.
@@ -283,7 +283,7 @@
{y,0}}.
{'%',{no_bin_opt,{[{x,1},{y,0}],{loop_rec_end,{f,38}},not_handled},
[70,{file,"receive_stacked.erl"}]}}.
- {test,is_binary,{f,39},[{x,0}]}.
+ {test,is_eq_exact,{f,39},[{x,0},{literal,<<0,1,2,3>>}]}.
remove_message.
{move,{integer,42},{x,0}}.
{line,[{location,"receive_stacked.erl",71}]}.
diff --git a/lib/erl_interface/configure.in b/lib/erl_interface/configure.in
index 53c2f7069c..67acb7c180 100644
--- a/lib/erl_interface/configure.in
+++ b/lib/erl_interface/configure.in
@@ -188,7 +188,7 @@ AC_MSG_CHECKING([for deprecated attribute])
AC_TRY_COMPILE([],
[void my_function(void) __attribute__((deprecated));],
[AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_DEPRECATED_ATTRIBUTE, [], [Define to 1 if you have the `deprecated' attribute])],
+ AC_DEFINE(HAVE_DEPRECATED_ATTRIBUTE, [1], [Define to 1 if you have the `deprecated' attribute])],
[AC_MSG_RESULT(no)])
diff --git a/lib/erl_interface/src/Makefile b/lib/erl_interface/src/Makefile
index 800557fbfe..00c49f1622 100644
--- a/lib/erl_interface/src/Makefile
+++ b/lib/erl_interface/src/Makefile
@@ -27,7 +27,9 @@ include $(ERL_TOP)/make/output.mk
include $(ERL_TOP)/make/target.mk
debug opt shared purify quantify purecov gcov:
+ifndef TERTIARY_BOOTSTRAP
$(make_verbose)$(MAKE) -f $(TARGET)/Makefile TYPE=$@
+endif
clean depend docs release release_docs tests release_tests check xmllint:
$(make_verbose)$(MAKE) -f $(TARGET)/Makefile $@
diff --git a/lib/ftp/src/ftp.erl b/lib/ftp/src/ftp.erl
index 40f6b53fa3..18cd8c7524 100644
--- a/lib/ftp/src/ftp.erl
+++ b/lib/ftp/src/ftp.erl
@@ -1065,9 +1065,9 @@ handle_call({_, {open, ip_comm, Opts, {CtrlOpts, DataPassOpts, DataActOpts}}}, F
end
end;
-handle_call({_, {open, tls_upgrade, TLSOptions}}, From, State) ->
- _ = send_ctrl_message(State, mk_cmd("AUTH TLS", [])),
- activate_ctrl_connection(State),
+handle_call({_, {open, tls_upgrade, TLSOptions}}, From, State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("AUTH TLS", [])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{client = From, caller = open, tls_options = TLSOptions}};
handle_call({_, {user, User, Password}}, From,
@@ -1081,17 +1081,17 @@ handle_call({_, {user, User, Password, Acc}}, From,
handle_call({_, {account, Acc}}, From, State)->
handle_user_account(Acc, State#state{client = From});
-handle_call({_, pwd}, From, #state{chunk = false} = State) ->
- _ = send_ctrl_message(State, mk_cmd("PWD", [])),
- activate_ctrl_connection(State),
+handle_call({_, pwd}, From, #state{chunk = false} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("PWD", [])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{client = From, caller = pwd}};
handle_call({_, lpwd}, From, #state{ldir = LDir} = State) ->
{reply, {ok, LDir}, State#state{client = From}};
-handle_call({_, {cd, Dir}}, From, #state{chunk = false} = State) ->
- _ = send_ctrl_message(State, mk_cmd("CWD ~s", [Dir])),
- activate_ctrl_connection(State),
+handle_call({_, {cd, Dir}}, From, #state{chunk = false} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("CWD ~s", [Dir])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{client = From, caller = cd}};
handle_call({_,{lcd, Dir}}, _From, #state{ldir = LDir0} = State) ->
@@ -1108,41 +1108,41 @@ handle_call({_, {dir, Len, Dir}}, {_Pid, _} = From,
setup_data_connection(State#state{caller = {dir, Dir, Len},
client = From});
handle_call({_, {rename, CurrFile, NewFile}}, From,
- #state{chunk = false} = State) ->
- _ = send_ctrl_message(State, mk_cmd("RNFR ~s", [CurrFile])),
- activate_ctrl_connection(State),
+ #state{chunk = false} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("RNFR ~s", [CurrFile])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {rename, NewFile}, client = From}};
handle_call({_, {delete, File}}, {_Pid, _} = From,
- #state{chunk = false} = State) ->
- _ = send_ctrl_message(State, mk_cmd("DELE ~s", [File])),
- activate_ctrl_connection(State),
+ #state{chunk = false} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("DELE ~s", [File])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{client = From}};
-handle_call({_, {mkdir, Dir}}, From, #state{chunk = false} = State) ->
- _ = send_ctrl_message(State, mk_cmd("MKD ~s", [Dir])),
- activate_ctrl_connection(State),
+handle_call({_, {mkdir, Dir}}, From, #state{chunk = false} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("MKD ~s", [Dir])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{client = From}};
-handle_call({_,{rmdir, Dir}}, From, #state{chunk = false} = State) ->
- _ = send_ctrl_message(State, mk_cmd("RMD ~s", [Dir])),
- activate_ctrl_connection(State),
+handle_call({_,{rmdir, Dir}}, From, #state{chunk = false} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("RMD ~s", [Dir])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{client = From}};
-handle_call({_,{type, Type}}, From, #state{chunk = false} = State) ->
+handle_call({_,{type, Type}}, From, #state{chunk = false} = State0) ->
case Type of
ascii ->
- _ = send_ctrl_message(State, mk_cmd("TYPE A", [])),
- activate_ctrl_connection(State),
+ _ = send_ctrl_message(State0, mk_cmd("TYPE A", [])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = type, type = ascii,
client = From}};
binary ->
- _ = send_ctrl_message(State, mk_cmd("TYPE I", [])),
- activate_ctrl_connection(State),
+ _ = send_ctrl_message(State0, mk_cmd("TYPE I", [])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = type, type = binary,
client = From}};
_ ->
- {reply, {error, etype}, State}
+ {reply, {error, etype}, State0}
end;
handle_call({_,{recv, RemoteFile, LocalFile}}, From,
@@ -1181,8 +1181,8 @@ handle_call({_, recv_chunk}, _From, #state{chunk = true,
} = State0) ->
%% The ftp:recv_chunk call was the last event we waited for, finnish and clean up
?DBG("recv_chunk_closing ftp:recv_chunk, last event",[]),
- activate_ctrl_connection(State0),
- {reply, ok, State0#state{caller = undefined,
+ State = activate_ctrl_connection(State0),
+ {reply, ok, State#state{caller = undefined,
chunk = false,
client = undefined}};
@@ -1238,18 +1238,18 @@ handle_call({_, {transfer_chunk, Bin}}, _, #state{chunk = true} = State) ->
handle_call({_, {transfer_chunk, _}}, _, #state{chunk = false} = State) ->
{reply, {error, echunk}, State};
-handle_call({_, chunk_end}, From, #state{chunk = true} = State) ->
- close_data_connection(State),
- activate_ctrl_connection(State),
+handle_call({_, chunk_end}, From, #state{chunk = true} = State0) ->
+ close_data_connection(State0),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{client = From, dsock = undefined,
caller = end_chunk_transfer, chunk = false}};
handle_call({_, chunk_end}, _, #state{chunk = false} = State) ->
{reply, {error, echunk}, State};
-handle_call({_, {quote, Cmd}}, From, #state{chunk = false} = State) ->
- _ = send_ctrl_message(State, mk_cmd(Cmd, [])),
- activate_ctrl_connection(State),
+handle_call({_, {quote, Cmd}}, From, #state{chunk = false} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd(Cmd, [])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{client = From, caller = quote}};
handle_call({_, _Req}, _From, #state{csock = CSock} = State)
@@ -1325,38 +1325,38 @@ handle_info({Trpt, Socket, Data}, #state{dsock = {Trpt,Socket}} = State0) when T
Data/binary>>}};
handle_info({Cls, Socket}, #state{dsock = {Trpt,Socket},
- caller = {recv_file, Fd}} = State)
+ caller = {recv_file, Fd}} = State0)
when {Cls,Trpt}=={tcp_closed,tcp} ; {Cls,Trpt}=={ssl_closed,ssl} ->
file_close(Fd),
- progress_report({transfer_size, 0}, State),
- activate_ctrl_connection(State),
+ progress_report({transfer_size, 0}, State0),
+ State = activate_ctrl_connection(State0),
?DBG("Data channel close",[]),
{noreply, State#state{dsock = undefined, data = <<>>}};
handle_info({Cls, Socket}, #state{dsock = {Trpt,Socket},
client = Client,
- caller = recv_chunk} = State)
+ caller = recv_chunk} = State0)
when {Cls,Trpt}=={tcp_closed,tcp} ; {Cls,Trpt}=={ssl_closed,ssl} ->
?DBG("Data channel close recv_chunk",[]),
- activate_ctrl_connection(State),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{dsock = undefined,
caller = #recv_chunk_closing{dconn_closed = true,
client_called_us = Client =/= undefined}
}};
handle_info({Cls, Socket}, #state{dsock = {Trpt,Socket}, caller = recv_bin,
- data = Data} = State)
+ data = Data} = State0)
when {Cls,Trpt}=={tcp_closed,tcp} ; {Cls,Trpt}=={ssl_closed,ssl} ->
?DBG("Data channel close",[]),
- activate_ctrl_connection(State),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{dsock = undefined, data = <<>>,
caller = {recv_bin, Data}}};
handle_info({Cls, Socket}, #state{dsock = {Trpt,Socket}, data = Data,
- caller = {handle_dir_result, Dir}}
- = State) when {Cls,Trpt}=={tcp_closed,tcp} ; {Cls,Trpt}=={ssl_closed,ssl} ->
+ caller = {handle_dir_result, Dir}}
+ = State0) when {Cls,Trpt}=={tcp_closed,tcp} ; {Cls,Trpt}=={ssl_closed,ssl} ->
?DBG("Data channel close",[]),
- activate_ctrl_connection(State),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{dsock = undefined,
caller = {handle_dir_result, Dir, Data},
% data = <<?CR,?LF>>}};
@@ -1377,7 +1377,7 @@ handle_info({Transport, Socket, Data}, #state{csock = {Transport, Socket},
client = From,
ctrl_data = {CtrlData, AccLines,
LineStatus}}
- = State) ->
+ = State0) ->
?DBG('--ctrl ~p ----> ~s~p~n',[Socket,<<CtrlData/binary, Data/binary>>,State]),
case ftp_response:parse_lines(<<CtrlData/binary, Data/binary>>,
AccLines, LineStatus) of
@@ -1387,21 +1387,21 @@ handle_info({Transport, Socket, Data}, #state{csock = {Transport, Socket},
case Caller of
quote ->
gen_server:reply(From, string:tokens(Lines, [?CR, ?LF])),
- {noreply, State#state{client = undefined,
- caller = undefined,
- latest_ctrl_response = Lines,
- ctrl_data = {NextMsgData, [],
- start}}};
+ {noreply, State0#state{client = undefined,
+ caller = undefined,
+ latest_ctrl_response = Lines,
+ ctrl_data = {NextMsgData, [],
+ start}}};
_ ->
?DBG(' ...handle_ctrl_result(~p,...) ctrl_data=~p~n',[CtrlResult,{NextMsgData, [], start}]),
handle_ctrl_result(CtrlResult,
- State#state{latest_ctrl_response = Lines,
- ctrl_data =
- {NextMsgData, [], start}})
+ State0#state{latest_ctrl_response = Lines,
+ ctrl_data =
+ {NextMsgData, [], start}})
end;
{continue, NewCtrlData} ->
?DBG(' ...Continue... ctrl_data=~p~n',[NewCtrlData]),
- activate_ctrl_connection(State),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{ctrl_data = NewCtrlData}}
end;
@@ -1567,19 +1567,19 @@ start_link(Opts, GenServerOptions) ->
%%% Help functions to handle_call and/or handle_ctrl_result
%%--------------------------------------------------------------------------
%% User handling
-handle_user(User, Password, Acc, State) ->
- _ = send_ctrl_message(State, mk_cmd("USER ~s", [User])),
- activate_ctrl_connection(State),
+handle_user(User, Password, Acc, State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("USER ~s", [User])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {handle_user, Password, Acc}}}.
-handle_user_passwd(Password, Acc, State) ->
- _ = send_ctrl_message(State, mk_cmd("PASS ~s", [Password])),
- activate_ctrl_connection(State),
+handle_user_passwd(Password, Acc, State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("PASS ~s", [Password])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {handle_user_passwd, Acc}}}.
-handle_user_account(Acc, State) ->
- _ = send_ctrl_message(State, mk_cmd("ACCT ~s", [Acc])),
- activate_ctrl_connection(State),
+handle_user_account(Acc, State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("ACCT ~s", [Acc])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = handle_user_account}}.
@@ -1594,9 +1594,9 @@ handle_ctrl_result({tls_upgrade, _}, #state{csock = {tcp, Socket},
?DBG('<--ctrl ssl:connect(~p, ~p)~n~p~n',[Socket,TLSOptions,State0]),
case ssl:connect(Socket, TLSOptions, Timeout) of
{ok, TLSSocket} ->
- State = State0#state{csock = {ssl,TLSSocket}},
- _ = send_ctrl_message(State, mk_cmd("PBSZ 0", [])),
- activate_ctrl_connection(State),
+ State1 = State0#state{csock = {ssl,TLSSocket}},
+ _ = send_ctrl_message(State1, mk_cmd("PBSZ 0", [])),
+ State = activate_ctrl_connection(State1),
{noreply, State#state{tls_upgrading_data_connection = {true, pbsz}} };
{error, _} = Error ->
gen_server:reply(From, {Error, self()}),
@@ -1605,9 +1605,9 @@ handle_ctrl_result({tls_upgrade, _}, #state{csock = {tcp, Socket},
tls_upgrading_data_connection = false}}
end;
-handle_ctrl_result({pos_compl, _}, #state{tls_upgrading_data_connection = {true, pbsz}} = State) ->
- _ = send_ctrl_message(State, mk_cmd("PROT P", [])),
- activate_ctrl_connection(State),
+handle_ctrl_result({pos_compl, _}, #state{tls_upgrading_data_connection = {true, pbsz}} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("PROT P", [])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{tls_upgrading_data_connection = {true, prot}}};
handle_ctrl_result({pos_compl, _}, #state{tls_upgrading_data_connection = {true, prot},
@@ -1801,10 +1801,10 @@ handle_ctrl_result({pos_compl, _}, #state{caller = {handle_dir_result, Dir,
handle_ctrl_result({pos_compl, Lines},
#state{caller = {handle_dir_data, Dir, DirData}} =
- State) ->
+ State0) ->
OldDir = pwd_result(Lines),
- _ = send_ctrl_message(State, mk_cmd("CWD ~s", [Dir])),
- activate_ctrl_connection(State),
+ _ = send_ctrl_message(State0, mk_cmd("CWD ~s", [Dir])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {handle_dir_data_second_phase, OldDir,
DirData}}};
handle_ctrl_result({Status, _},
@@ -1818,9 +1818,9 @@ handle_ctrl_result(S={_Status, _},
handle_ctrl_result({pos_compl, _},
#state{caller = {handle_dir_data_second_phase, OldDir,
- DirData}} = State) ->
- _ = send_ctrl_message(State, mk_cmd("CWD ~s", [OldDir])),
- activate_ctrl_connection(State),
+ DirData}} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("CWD ~s", [OldDir])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {handle_dir_data_third_phase, DirData}}};
handle_ctrl_result({Status, _},
#state{caller = {handle_dir_data_second_phase, _, _}}
@@ -1840,9 +1840,9 @@ handle_ctrl_result(Status={epath, _}, #state{caller = {dir,_}} = State) ->
%%--------------------------------------------------------------------------
%% File renaming
handle_ctrl_result({pos_interm, _}, #state{caller = {rename, NewFile}}
- = State) ->
- _ = send_ctrl_message(State, mk_cmd("RNTO ~s", [NewFile])),
- activate_ctrl_connection(State),
+ = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("RNTO ~s", [NewFile])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = rename_second_phase}};
handle_ctrl_result({Status, _},
@@ -1916,10 +1916,10 @@ handle_ctrl_result({pos_compl, _}, #state{client = From,
%% The pos_compl was the last event we waited for, finnish and clean up
?DBG("recv_chunk_closing pos_compl, last event",[]),
gen_server:reply(From, ok),
- activate_ctrl_connection(State0),
- {noreply, State0#state{caller = undefined,
- chunk = false,
- client = undefined}};
+ State = activate_ctrl_connection(State0),
+ {noreply, State#state{caller = undefined,
+ chunk = false,
+ client = undefined}};
handle_ctrl_result({pos_compl, _}, #state{caller = #recv_chunk_closing{}=R}
= State0) ->
@@ -2013,71 +2013,71 @@ ctrl_result_response(_, #state{client = From} = State, ErrorMsg) ->
{noreply, State#state{client = undefined, caller = undefined}}.
%%--------------------------------------------------------------------------
-handle_caller(#state{caller = {dir, Dir, Len}} = State) ->
+handle_caller(#state{caller = {dir, Dir, Len}} = State0) ->
Cmd = case Len of
short -> "NLST";
long -> "LIST"
end,
_ = case Dir of
"" ->
- send_ctrl_message(State, mk_cmd(Cmd, ""));
+ send_ctrl_message(State0, mk_cmd(Cmd, ""));
_ ->
- send_ctrl_message(State, mk_cmd(Cmd ++ " ~s", [Dir]))
+ send_ctrl_message(State0, mk_cmd(Cmd ++ " ~s", [Dir]))
end,
- activate_ctrl_connection(State),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {dir, Dir}}};
-handle_caller(#state{caller = {recv_bin, RemoteFile}} = State) ->
- _ = send_ctrl_message(State, mk_cmd("RETR ~s", [RemoteFile])),
- activate_ctrl_connection(State),
+handle_caller(#state{caller = {recv_bin, RemoteFile}} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("RETR ~s", [RemoteFile])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = recv_bin}};
handle_caller(#state{caller = {start_chunk_transfer, Cmd, RemoteFile}} =
- State) ->
- _ = send_ctrl_message(State, mk_cmd("~s ~s", [Cmd, RemoteFile])),
- activate_ctrl_connection(State),
+ State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("~s ~s", [Cmd, RemoteFile])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = start_chunk_transfer}};
-handle_caller(#state{caller = {recv_file, RemoteFile, Fd}} = State) ->
- _ = send_ctrl_message(State, mk_cmd("RETR ~s", [RemoteFile])),
- activate_ctrl_connection(State),
+handle_caller(#state{caller = {recv_file, RemoteFile, Fd}} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("RETR ~s", [RemoteFile])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {recv_file, Fd}}};
handle_caller(#state{caller = {transfer_file, {Cmd, LocalFile, RemoteFile}},
- ldir = LocalDir, client = From} = State) ->
+ ldir = LocalDir, client = From} = State0) ->
case file_open(filename:absname(LocalFile, LocalDir), read) of
{ok, Fd} ->
- _ = send_ctrl_message(State, mk_cmd("~s ~s", [Cmd, RemoteFile])),
- activate_ctrl_connection(State),
+ _ = send_ctrl_message(State0, mk_cmd("~s ~s", [Cmd, RemoteFile])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {transfer_file, Fd}}};
{error, _} ->
gen_server:reply(From, {error, epath}),
- {noreply, State#state{client = undefined, caller = undefined,
- dsock = undefined}}
+ {noreply, State0#state{client = undefined, caller = undefined,
+ dsock = undefined}}
end;
handle_caller(#state{caller = {transfer_data, {Cmd, Bin, RemoteFile}}} =
- State) ->
- _ = send_ctrl_message(State, mk_cmd("~s ~s", [Cmd, RemoteFile])),
- activate_ctrl_connection(State),
+ State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("~s ~s", [Cmd, RemoteFile])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {transfer_data, Bin}}}.
%% ----------- FTP SERVER COMMUNICATION -------------------------
%% Connect to FTP server at Host (default is TCP port 21)
%% in order to establish a control connection.
-setup_ctrl_connection(Host, Port, Timeout, #state{sockopts_ctrl = SockOpts} = State) ->
+setup_ctrl_connection(Host, Port, Timeout, #state{sockopts_ctrl = SockOpts} = State0) ->
MsTime = erlang:monotonic_time(),
- case connect(Host, Port, SockOpts, Timeout, State) of
+ case connect(Host, Port, SockOpts, Timeout, State0) of
{ok, IpFam, CSock} ->
- NewState = State#state{csock = {tcp, CSock}, ipfamily = IpFam},
- activate_ctrl_connection(NewState),
+ State1 = State0#state{csock = {tcp, CSock}, ipfamily = IpFam},
+ State = activate_ctrl_connection(State1),
case Timeout - millisec_passed(MsTime) of
Timeout2 when (Timeout2 >= 0) ->
- {ok, NewState#state{caller = open}, Timeout2};
+ {ok, State#state{caller = open}, Timeout2};
_ ->
%% Oups: Simulate timeout
- {ok, NewState#state{caller = open}, 0}
+ {ok, State#state{caller = open}, 0}
end;
Error ->
Error
@@ -2087,7 +2087,7 @@ setup_data_connection(#state{mode = active,
caller = Caller,
csock = CSock,
sockopts_data_active = SockOpts,
- ftp_extension = FtpExt} = State) ->
+ ftp_extension = FtpExt} = State0) ->
case (catch sockname(CSock)) of
{ok, {{_, _, _, _, _, _, _, _} = IP0, _}} ->
IP = proplists:get_value(ip, SockOpts, IP0),
@@ -2098,8 +2098,8 @@ setup_data_connection(#state{mode = active,
{ok, {_, Port}} = sockname({tcp,LSock}),
IpAddress = inet_parse:ntoa(IP),
Cmd = mk_cmd("EPRT |2|~s|~p|", [IpAddress, Port]),
- _ = send_ctrl_message(State, Cmd),
- activate_ctrl_connection(State),
+ _ = send_ctrl_message(State0, Cmd),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {setup_data_connection,
{LSock, Caller}}}};
{ok, {{_,_,_,_} = IP0, _}} ->
@@ -2112,37 +2112,37 @@ setup_data_connection(#state{mode = active,
false ->
{IP1, IP2, IP3, IP4} = IP,
{Port1, Port2} = {Port div 256, Port rem 256},
- send_ctrl_message(State,
+ send_ctrl_message(State0,
mk_cmd("PORT ~w,~w,~w,~w,~w,~w",
[IP1, IP2, IP3, IP4, Port1, Port2]));
true ->
IpAddress = inet_parse:ntoa(IP),
Cmd = mk_cmd("EPRT |1|~s|~p|", [IpAddress, Port]),
- send_ctrl_message(State, Cmd)
+ send_ctrl_message(State0, Cmd)
end,
- activate_ctrl_connection(State),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {setup_data_connection,
{LSock, Caller}}}}
end;
setup_data_connection(#state{mode = passive, ipfamily = inet6,
- caller = Caller} = State) ->
- _ = send_ctrl_message(State, mk_cmd("EPSV", [])),
- activate_ctrl_connection(State),
+ caller = Caller} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("EPSV", [])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {setup_data_connection, Caller}}};
setup_data_connection(#state{mode = passive, ipfamily = inet,
caller = Caller,
- ftp_extension = false} = State) ->
- _ = send_ctrl_message(State, mk_cmd("PASV", [])),
- activate_ctrl_connection(State),
+ ftp_extension = false} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("PASV", [])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {setup_data_connection, Caller}}};
setup_data_connection(#state{mode = passive, ipfamily = inet,
caller = Caller,
- ftp_extension = true} = State) ->
- _ = send_ctrl_message(State, mk_cmd("EPSV", [])),
- activate_ctrl_connection(State),
+ ftp_extension = true} = State0) ->
+ _ = send_ctrl_message(State0, mk_cmd("EPSV", [])),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = {setup_data_connection, Caller}}}.
connect(Host, Port, SockOpts, Timeout, #state{ipfamily = inet = IpFam}) ->
@@ -2248,14 +2248,15 @@ send_message({tcp, Socket}, Message) ->
send_message({ssl, Socket}, Message) ->
ssl:send(Socket, Message).
-activate_ctrl_connection(#state{csock = CSock, ctrl_data = {<<>>, _, _}}) ->
- activate_connection(CSock);
-activate_ctrl_connection(#state{csock = CSock}) ->
+activate_ctrl_connection(#state{csock = CSock, ctrl_data = {<<>>, _, _}} = State) ->
+ activate_connection(CSock),
+ State;
+activate_ctrl_connection(#state{csock = CSock} = State0) ->
activate_connection(CSock),
%% We have already received at least part of the next control message,
%% that has been saved in ctrl_data, process this first.
- self() ! {socket_type(CSock), unwrap_socket(CSock), <<>>},
- ok.
+ {noreply, State} = handle_info({socket_type(CSock), unwrap_socket(CSock), <<>>}, State0),
+ State.
activate_data_connection(#state{dsock = DSock} = State) ->
activate_connection(DSock),
@@ -2290,22 +2291,22 @@ close_connection({ssl, Socket}) -> ignore_return_value( ssl:close(Socket) ).
%% ------------ FILE HANDLING ----------------------------------------
send_file(#state{tls_upgrading_data_connection = {true, CTRL, _}} = State, Fd) ->
{noreply, State#state{tls_upgrading_data_connection = {true, CTRL, ?MODULE, send_file, Fd}}};
-send_file(State, Fd) ->
+send_file(State0, Fd) ->
case file_read(Fd) of
{ok, N, Bin} when N > 0 ->
- send_data_message(State, Bin),
- progress_report({binary, Bin}, State),
- send_file(State, Fd);
+ send_data_message(State0, Bin),
+ progress_report({binary, Bin}, State0),
+ send_file(State0, Fd);
{ok, _, _} ->
file_close(Fd),
- close_data_connection(State),
- progress_report({transfer_size, 0}, State),
- activate_ctrl_connection(State),
+ close_data_connection(State0),
+ progress_report({transfer_size, 0}, State0),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = transfer_file_second_phase,
dsock = undefined}};
{error, Reason} ->
- gen_server:reply(State#state.client, {error, Reason}),
- {stop, normal, State#state{client = undefined}}
+ gen_server:reply(State0#state.client, {error, Reason}),
+ {stop, normal, State0#state{client = undefined}}
end.
file_open(File, Option) ->
@@ -2347,10 +2348,10 @@ cast(GenServer, Msg) ->
send_bin(#state{tls_upgrading_data_connection = {true, CTRL, _}} = State, Bin) ->
State#state{tls_upgrading_data_connection = {true, CTRL, ?MODULE, send_bin, Bin}};
-send_bin(State, Bin) ->
- send_data_message(State, Bin),
- close_data_connection(State),
- activate_ctrl_connection(State),
+send_bin(State0, Bin) ->
+ send_data_message(State0, Bin),
+ close_data_connection(State0),
+ State = activate_ctrl_connection(State0),
{noreply, State#state{caller = transfer_data_second_phase,
dsock = undefined}}.
diff --git a/lib/ftp/test/ftp_SUITE.erl b/lib/ftp/test/ftp_SUITE.erl
index 7c87d5cbdb..81147b5821 100644
--- a/lib/ftp/test/ftp_SUITE.erl
+++ b/lib/ftp/test/ftp_SUITE.erl
@@ -669,9 +669,9 @@ recv_chunk(Config0) ->
Contents = list_to_binary( lists:duplicate(1000, lists:seq(0,255)) ),
Config = set_state([reset, {mkfile,File,Contents}], Config0),
Pid = proplists:get_value(ftp, Config),
- {{error, "ftp:recv_chunk_start/2 not called"},_} = recv_chunk(Pid, <<>>),
+ {error, "ftp:recv_chunk_start/2 not called"} = do_recv_chunk(Pid),
ok = ftp:recv_chunk_start(Pid, id2ftp(File,Config)),
- {ok, ReceivedContents, _Ncunks} = recv_chunk(Pid, <<>>),
+ {ok, ReceivedContents} = do_recv_chunk(Pid),
find_diff(ReceivedContents, Contents).
recv_chunk_twice() ->
@@ -683,11 +683,11 @@ recv_chunk_twice(Config0) ->
Contents2 = crypto:strong_rand_bytes(1200),
Config = set_state([reset, {mkfile,File1,Contents1}, {mkfile,File2,Contents2}], Config0),
Pid = proplists:get_value(ftp, Config),
- {{error, "ftp:recv_chunk_start/2 not called"},_} = recv_chunk(Pid, <<>>),
+ {error, "ftp:recv_chunk_start/2 not called"} = do_recv_chunk(Pid),
ok = ftp:recv_chunk_start(Pid, id2ftp(File1,Config)),
- {ok, ReceivedContents1, _Ncunks1} = recv_chunk(Pid, <<>>),
+ {ok, ReceivedContents1} = do_recv_chunk(Pid),
ok = ftp:recv_chunk_start(Pid, id2ftp(File2,Config)),
- {ok, ReceivedContents2, _Ncunks2} = recv_chunk(Pid, <<>>),
+ {ok, ReceivedContents2} = do_recv_chunk(Pid),
find_diff(ReceivedContents1, Contents1),
find_diff(ReceivedContents2, Contents2).
@@ -704,47 +704,50 @@ recv_chunk_three_times(Config0) ->
Config = set_state([reset, {mkfile,File1,Contents1}, {mkfile,File2,Contents2}, {mkfile,File3,Contents3}], Config0),
Pid = proplists:get_value(ftp, Config),
- {{error, "ftp:recv_chunk_start/2 not called"},_} = recv_chunk(Pid, <<>>),
+ {error, "ftp:recv_chunk_start/2 not called"} = do_recv_chunk(Pid),
+ ok = ftp:recv_chunk_start(Pid, id2ftp(File3,Config)),
+ {ok, ReceivedContents3} = do_recv_chunk(Pid),
+
ok = ftp:recv_chunk_start(Pid, id2ftp(File1,Config)),
- {ok, ReceivedContents1, Nchunks1} = recv_chunk(Pid, <<>>),
+ {ok, ReceivedContents1} = do_recv_chunk(Pid),
ok = ftp:recv_chunk_start(Pid, id2ftp(File2,Config)),
- {ok, ReceivedContents2, _Nchunks2} = recv_chunk(Pid, <<>>),
-
- ok = ftp:recv_chunk_start(Pid, id2ftp(File3,Config)),
- {ok, ReceivedContents3, _Nchunks3} = recv_chunk(Pid, <<>>, 10000, 0, Nchunks1),
+ {ok, ReceivedContents2} = do_recv_chunk(Pid),
find_diff(ReceivedContents1, Contents1),
find_diff(ReceivedContents2, Contents2),
find_diff(ReceivedContents3, Contents3).
-
+do_recv_chunk(Pid) ->
+ recv_chunk(Pid, <<>>).
recv_chunk(Pid, Acc) ->
- recv_chunk(Pid, Acc, 0, 0, undefined).
-
-
-
-%% ExpectNchunks :: integer() | undefined
-recv_chunk(Pid, Acc, DelayMilliSec, N, ExpectNchunks) when N+1 < ExpectNchunks ->
- %% for all I in integer(), I < undefined
- recv_chunk1(Pid, Acc, DelayMilliSec, N, ExpectNchunks);
-
-recv_chunk(Pid, Acc, DelayMilliSec, N, ExpectNchunks) ->
- %% N >= ExpectNchunks-1
- timer:sleep(DelayMilliSec),
- recv_chunk1(Pid, Acc, DelayMilliSec, N, ExpectNchunks).
-
-
-recv_chunk1(Pid, Acc, DelayMilliSec, N, ExpectNchunks) ->
- ct:log("Call ftp:recv_chunk",[]),
case ftp:recv_chunk(Pid) of
- ok -> {ok, Acc, N};
- {ok, Bin} -> recv_chunk(Pid, <<Acc/binary, Bin/binary>>, DelayMilliSec, N+1, ExpectNchunks);
- Error -> {Error, N}
+ ok ->
+ {ok, Acc};
+ {ok, Bin} ->
+ recv_chunk(Pid, <<Acc/binary, Bin/binary>>);
+ Error ->
+ Error
end.
+%% Make new test case that uses this or new code
+%% only test one thing at the time
+%% delay_recv_chunk(Pid) ->
+%% delay_recv_chunk(Pid, <<>>).
+%% delay_recv_chunk(Pid, Acc) ->
+%% ct:pal("FOO ~p", [byte_size(Acc)]),
+%% case ftp:recv_chunk(Pid) of
+%% ok ->
+%% {ok, Acc};
+%% {ok, Bin} ->
+%% ct:sleep(100),
+%% delay_recv_chunk(Pid, <<Acc/binary, Bin/binary>>);
+%% Error ->
+%% Error
+%% end.
+
%%-------------------------------------------------------------------------
type() ->
[{doc,"Test that we can change btween ASCCI and binary transfer mode"}].
diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml
index 15dbdb47dc..dbd83e1a6e 100644
--- a/lib/kernel/doc/src/kernel_app.xml
+++ b/lib/kernel/doc/src/kernel_app.xml
@@ -510,11 +510,13 @@ MaxT = TickTime + TickTime / 4</code>
parameters for Logger are not set.</p>
<taglist>
<tag><c>error_logger</c></tag>
- <item>Replaced by setting the type of the default
- <seealso marker="logger_std_h#type"><c>logger_std_h</c></seealso>
- to the same value. Example:
+ <item>Replaced by setting the <seealso
+ marker="logger_std_h#type"><c>type</c></seealso>, and possibly
+ <seealso marker="logger_std_h#file"><c>file</c></seealso> and
+ <seealso marker="logger_std_h#modes"><c>modes</c></seealso>
+ parameters of the default <c>logger_std_h</c> handler. Example:
<code type="none">
-erl -kernel logger '[{handler,default,logger_std_h,#{config=>#{type=>{file,"/tmp/erlang.log"}}}}]'
+erl -kernel logger '[{handler,default,logger_std_h,#{config=>#{file=>"/tmp/erlang.log"}}}]'
</code>
</item>
<tag><c>error_logger_format_depth</c></tag>
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
index e6448e144e..ebebcaa1ae 100644
--- a/lib/kernel/doc/src/logger.xml
+++ b/lib/kernel/doc/src/logger.xml
@@ -66,7 +66,7 @@ logger:error("error happened because: ~p", [Reason]). % Without macro
[{kernel,
[{logger,
[{handler, default, logger_std_h,
- #{config => #{type => {file, "path/to/file.log"}}}}]}]}].
+ #{config => #{file => "path/to/file.log"}}}]}]}].
</code>
<p>
For more information about:
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
index 03b9edcf8f..8458ffa042 100644
--- a/lib/kernel/doc/src/logger_chapter.xml
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -801,7 +801,7 @@ logger:debug(#{got => connection_request, id => Id, state => State},
[{kernel,
[{logger,
[{handler, default, logger_std_h, % {handler, HandlerId, Module,
- #{config => #{type => {file,"log/erlang.log"}}}} % Config}
+ #{config => #{file => "log/erlang.log"}}} % Config}
]}]}].
</code>
<p>Modify the default handler to print each log event as a
@@ -831,10 +831,10 @@ logger:debug(#{got => connection_request, id => Id, state => State},
[{logger,
[{handler, default, logger_std_h,
#{level => error,
- config => #{type => {file, "log/erlang.log"}}}},
+ config => #{file => "log/erlang.log"}}},
{handler, info, logger_std_h,
#{level => debug,
- config => #{type => {file, "log/debug.log"}}}}
+ config => #{file => "log/debug.log"}}}
]}]}].
</code>
</section>
@@ -1004,10 +1004,10 @@ ok</pre>
<p>Then, add a new handler which prints to file. You can use the
handler
module <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>,
- and specify type <c>{file,File}</c>.:</p>
+ and configure it to log to file:</p>
<pre>
-4> <input>Config = #{config => #{type => {file,"./info.log"}}, level => info}.</input>
-#{config => #{type => {file,"./info.log"}},level => info}
+4> <input>Config = #{config => #{file => "./info.log"}, level => info}.</input>
+#{config => #{file => "./info.log"},level => info}
5> <input>logger:add_handler(myhandler, logger_std_h, Config).</input>
ok</pre>
<p>Since <c>filter_default</c> defaults to <c>log</c>, this
@@ -1246,7 +1246,7 @@ do_log(Fd, LogEvent, #{formatter := {FModule, FConfig}}) ->
<p>A configuration example:</p>
<code type="none">
logger:add_handler(my_standard_h, logger_std_h,
- #{config => #{type => {file,"./system_info.log"},
+ #{config => #{file => "./system_info.log",
sync_mode_qlen => 100,
drop_mode_qlen => 1000,
flush_qlen => 2000}}).
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
index fcd180abd6..5ed1a2f210 100644
--- a/lib/kernel/doc/src/logger_std_h.xml
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -55,30 +55,105 @@
is stored in a sub map with the key <c>config</c>, and can contain the
following parameters:</p>
<taglist>
- <tag><marker id="type"/><c>type</c></tag>
+ <tag><marker id="type"/><c>type = standard_io | standard_error | file</c></tag>
<item>
- <p>This has the value <c>standard_io</c>, <c>standard_error</c>,
- <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>.</p>
- <p>If <c>LogFileOpts</c> is specified, it replaces the default
- list of options used when opening the log file. The default
- list is <c>[raw,append,delayed_write]</c>. One reason to do
- so can be to change <c>append</c> to, for
- example, <c>write</c>, ensuring that the old log is
- truncated when a node is restarted. See the reference manual
- for <seealso marker="file#open-2"><c>file:open/2</c></seealso>
- for more information about file options.</p>
+ <p>Specifies the log destination.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>standard_io</c>, unless
+ parameter <seealso marker="#file"><c>file</c></seealso> is
+ given, in which case it defaults to <c>file</c>.</p>
+ </item>
+ <tag><marker id="file"/><c>file = </c><seealso marker="file#type-filename"><c>file:filename()</c></seealso></tag>
+ <item>
+ <p>This specifies the name of the log file when the handler is
+ of type <c>file</c>.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to the same name as the handler identity, in the
+ current directory.</p>
+ </item>
+ <tag><marker id="modes"/><c>modes = [</c><seealso marker="file#type-mode"><c>file:mode()</c></seealso><c>]</c></tag>
+ <item>
+ <p>This specifies the file modes to use when opening the log
+ file,
+ see <seealso marker="file#open-2"><c>file:open/2</c></seealso>.
+ If <c>modes</c> are not specified, the default list used
+ is <c>[raw,append,delayed_write]</c>. If <c>modes</c> are
+ specified, the list replaces the default modes list with the
+ following adjustments:</p>
+ <list>
+ <item>
+ If <c>raw</c> is not found in the list, it is added.
+ </item>
+ <item>
+ If none of <c>write</c>, <c>append</c> or <c>exclusive</c> is
+ found in the list, <c>append</c> is added.</item>
+ <item>If none of <c>delayed_write</c>
+ or <c>{delayed_write,Size,Delay}</c> is found in the
+ list, <c>delayed_write</c> is added.</item>
+ </list>
<p>Log files are always UTF-8 encoded. The encoding can not be
- changed by setting the option <c>{encoding,Encoding}</c>
- in <c>LogFileOpts</c>.</p>
- <p>Notice that the standard handler does not have support for
- circular logging. Use the disk_log handler,
- <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>,
- for this.</p>
+ changed by setting the mode <c>{encoding,Encoding}</c>.</p>
<p>The value is set when the handler is added, and it can not
be changed in runtime.</p>
- <p>Defaults to <c>standard_io</c>.</p>
+ <p>Defaults to <c>[raw,append,delayed_write]</c>.</p>
+ </item>
+ <tag><marker id="max_no_bytes"/><c>max_no_bytes = pos_integer() | infinity</c></tag>
+ <item>
+ <p>This parameter specifies if the log file should be rotated
+ or not. The value <c>infinity</c> means the log file will
+ grow indefinitely, while an integer value specifies at which
+ file size (bytes) the file is rotated.</p>
+ <p>Defaults to <c>infinity</c>.</p>
+ </item>
+ <tag><marker id="max_no_files"/><c>max_no_files = non_neg_integer()</c></tag>
+ <item>
+ <p>This parameter specifies the number of rotated log file
+ archives to keep. This has meaning only
+ if <seealso marker="#max_no_bytes"><c>max_no_bytes</c></seealso>
+ is set to an integer value.</p>
+ <p>The log archives are
+ named <c>FileName.0</c>, <c>FileName.1</c>,
+ ... <c>FileName.N</c>, where <c>FileName</c> is the name of
+ the current log file. <c>FileName.0</c> is the newest of the
+ archives. The maximum value for <c>N</c> is the value
+ of <c>max_no_files</c> minus 1.</p>
+ <p>Notice that setting this value to <c>0</c> does not turn of
+ rotation. It only specifies that no archives are kept.</p>
+ <p>Defaults to <c>0</c>.</p>
+ </item>
+ <tag><marker id="compress_on_rotate"/><c>compress_on_rotate = boolean()</c></tag>
+ <item>
+ <p>This parameter specifies if the rotated log file archives
+ shall be compressed or not. If set to <c>true</c>, all
+ archives are compressed with <c>gzip</c>, and renamed
+ to <c>FileName.N.gz</c></p>
+ <p><c>compress_on_rotate</c> has no meaning if <seealso
+ marker="#max_no_bytes"><c>max_no_bytes</c></seealso> has the
+ value <c>infinity</c>.</p>
+ <p>Defaults to <c>false</c>.</p>
+ </item>
+ <tag><marker id="file_check"/><c>file_check = non_neg_integer()</c></tag>
+ <item>
+ <p>When <c>logger_std_h</c> logs to a file, it reads the file
+ information of the log file prior to each write
+ operation. This is to make sure the file still exists and
+ has the same inode as when it was opened. This implies some
+ performance loss, but ensures that no log events are lost in
+ the case when the file has been removed or renamed by an
+ external actor.</p>
+ <p>In order to allow minimizing the performance loss, the
+ <c>file_check</c> parameter can be set to a positive integer
+ value, <c>N</c>. The handler will then skip reading the file
+ information prior to writing, as long as no more
+ than <c>N</c> milliseconds have passed since it was last
+ read.</p>
+ <p>Notice that the risk of loosing log events grows when
+ the <c>file_check</c> value grows.</p>
+ <p>Defaults to 0.</p>
</item>
- <tag><c>filesync_repeat_interval</c></tag>
+ <tag><c>filesync_repeat_interval = pos_integer() | no_repeat</c></tag>
<item>
<p>This value, in milliseconds, specifies how often the handler does
a file sync operation to write buffered data to disk. The handler attempts
@@ -97,12 +172,13 @@
standard handler and the disk_log handler, and are documented in the
<seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
</seealso>.</p>
- <p>Notice that if changing the configuration of the handler in runtime,
- the <c>type</c> parameter must not be modified.</p>
+ <p>Notice that if changing the configuration of the handler in
+ runtime, the <c>type</c>, <c>file</c>, or <c>modes</c> parameters
+ must not be modified.</p>
<p>Example of adding a standard handler:</p>
<code type="none">
logger:add_handler(my_standard_h, logger_std_h,
- #{config => #{type => {file,"./system_info.log"},
+ #{config => #{file => "./system_info.log",
filesync_repeat_interval => 1000}}).
</code>
<p>To set the default handler, that starts initially with
@@ -110,7 +186,7 @@ logger:add_handler(my_standard_h, logger_std_h,
change the Kernel default logger configuration. Example:</p>
<code type="none">
erl -kernel logger '[{handler,default,logger_std_h,
- #{config => #{type => {file,"./log.log"}}}}]'
+ #{config => #{file => "./log.log"}}}]'
</code>
<p>An example of how to replace the standard handler with a disk_log handler
at startup is found in the
diff --git a/lib/kernel/doc/src/notes.xml b/lib/kernel/doc/src/notes.xml
index 021ecfa40d..1f4d9be1f5 100644
--- a/lib/kernel/doc/src/notes.xml
+++ b/lib/kernel/doc/src/notes.xml
@@ -31,6 +31,25 @@
</header>
<p>This document describes the changes made to the Kernel application.</p>
+<section><title>Kernel 6.2.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Setting the <c>recbuf</c> size of an inet socket the
+ <c>buffer</c> is also automatically increased. Fix a bug
+ where the auto adjustment of inet buffer size would be
+ triggered even if an explicit inet buffer size had
+ already been set.</p>
+ <p>
+ Own Id: OTP-15651 Aux Id: ERIERL-304 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Kernel 6.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src
index 4b48f6cd1d..8fe6bdd1ca 100644
--- a/lib/kernel/src/kernel.app.src
+++ b/lib/kernel/src/kernel.app.src
@@ -147,6 +147,6 @@
{logger_sasl_compatible, false}
]},
{mod, {kernel, []}},
- {runtime_dependencies, ["erts-10.1", "stdlib-3.5", "sasl-3.0"]}
+ {runtime_dependencies, ["erts-10.2.5", "stdlib-3.5", "sasl-3.0"]}
]
}.
diff --git a/lib/kernel/src/kernel.appup.src b/lib/kernel/src/kernel.appup.src
index ccf0a82ced..66fbcbf78d 100644
--- a/lib/kernel/src/kernel.appup.src
+++ b/lib/kernel/src/kernel.appup.src
@@ -40,7 +40,9 @@
{<<"^6\\.0\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
{<<"^6\\.1$">>,[restart_new_emulator]},
{<<"^6\\.1\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
- {<<"^6\\.1\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]}],
+ {<<"^6\\.1\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.2$">>,[restart_new_emulator]},
+ {<<"^6\\.2\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]}],
[{<<"^5\\.3$">>,[restart_new_emulator]},
{<<"^5\\.3\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
{<<"^5\\.3\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
@@ -54,4 +56,6 @@
{<<"^6\\.0\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
{<<"^6\\.1$">>,[restart_new_emulator]},
{<<"^6\\.1\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
- {<<"^6\\.1\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]}]}.
+ {<<"^6\\.1\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.2$">>,[restart_new_emulator]},
+ {<<"^6\\.2\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]}]}.
diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl
index ded89bac9f..8696adbd72 100644
--- a/lib/kernel/src/logger_formatter.erl
+++ b/lib/kernel/src/logger_formatter.erl
@@ -64,7 +64,7 @@ format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
Config;
Size0 ->
Size =
- case Size0 - string:length([B,A]) of
+ case Size0 - io_lib:chars_length([B,A]) of
S when S>=0 -> S;
_ -> 0
end,
@@ -75,7 +75,11 @@ format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
true ->
%% Trim leading and trailing whitespaces, and replace
%% newlines with ", "
- re:replace(string:trim(MsgStr0),",?\r?\n\s*",", ",
+ T = lists:reverse(
+ trim(
+ lists:reverse(
+ trim(MsgStr0,false)),true)),
+ re:replace(T,",?\r?\n\s*",", ",
[{return,list},global,unicode]);
_false ->
MsgStr0
@@ -83,7 +87,26 @@ format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
true ->
""
end,
- truncate([B,MsgStr,A],maps:get(max_size,Config)).
+ truncate(B,MsgStr,A,maps:get(max_size,Config)).
+
+trim([H|T],Rev) when H==$\s; H==$\r; H==$\n ->
+ trim(T,Rev);
+trim([H|T],false) when is_list(H) ->
+ case trim(H,false) of
+ [] ->
+ trim(T,false);
+ TrimmedH ->
+ [TrimmedH|T]
+ end;
+trim([H|T],true) when is_list(H) ->
+ case trim(lists:reverse(H),true) of
+ [] ->
+ trim(T,true);
+ TrimmedH ->
+ [lists:reverse(TrimmedH)|T]
+ end;
+trim(String,_) ->
+ String.
do_format(Level,Data,[level|Format],Config) ->
[to_string(level,Level,Config)|do_format(Level,Data,Format,Config)];
@@ -239,21 +262,47 @@ chardata_to_list(Chardata) ->
throw(Error)
end.
-truncate(String,unlimited) ->
- String;
-truncate(String,Size) ->
- Length = string:length(String),
+truncate(B,Msg,A,unlimited) ->
+ [B,Msg,A];
+truncate(B,Msg,A,Size) ->
+ String = [B,Msg,A],
+ Length = io_lib:chars_length(String),
if Length>Size ->
- case lists:reverse(lists:flatten(String)) of
- [$\n|_] ->
- string:slice(String,0,Size-4)++"...\n";
+ {Last,FlatString} =
+ case A of
+ [] ->
+ case Msg of
+ [] ->
+ {get_last(B),lists:flatten(B)};
+ _ ->
+ {get_last(Msg),lists:flatten([B,Msg])}
+ end;
+ _ ->
+ {get_last(A),lists:flatten(String)}
+ end,
+ case Last of
+ $\n->
+ lists:sublist(FlatString,1,Size-4)++"...\n";
_ ->
- string:slice(String,0,Size-3)++"..."
+ lists:sublist(FlatString,1,Size-3)++"..."
end;
true ->
String
end.
+get_last(L) ->
+ get_first(lists:reverse(L)).
+
+get_first([]) ->
+ error;
+get_first([C|_]) when is_integer(C) ->
+ C;
+get_first([L|Rest]) when is_list(L) ->
+ case get_last(L) of
+ error -> get_first(Rest);
+ First -> First
+ end.
+
%% SysTime is the system time in microseconds
format_time(SysTime,#{time_offset:=Offset,time_designator:=Des})
when is_integer(SysTime) ->
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
index 4b5e0a7dd0..16946ff97c 100644
--- a/lib/kernel/src/logger_h_common.erl
+++ b/lib/kernel/src/logger_h_common.erl
@@ -142,8 +142,9 @@ changing_config(SetOrUpdate,
maps:with(?OLP_KEYS,NewHConfig0)),
case logger_olp:set_opts(Olp,NewOlpOpts) of
ok ->
- maybe_set_repeated_filesync(Olp,OldCommonConfig,
- NewCommonConfig),
+ logger_olp:cast(Olp, {config_changed,
+ NewCommonConfig,
+ NewHandlerConfig}),
ReadOnly = maps:with(?READ_ONLY_KEYS,OldHConfig),
NewHConfig =
maps:merge(
@@ -281,11 +282,24 @@ handle_cast(repeated_filesync,
State#{handler_state => HS, last_op => sync}
end,
{noreply,set_repeated_filesync(State1)};
-
-handle_cast({set_repeated_filesync,FSyncInt},State) ->
- State1 = State#{filesync_repeat_interval=>FSyncInt},
- State2 = set_repeated_filesync(cancel_repeated_filesync(State1)),
- {noreply, State2}.
+handle_cast({config_changed, CommonConfig, HConfig},
+ State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState,
+ filesync_repeat_interval := OldFSyncInt}) ->
+ State1 =
+ case maps:get(filesync_repeat_interval,CommonConfig) of
+ OldFSyncInt ->
+ State;
+ FSyncInt ->
+ set_repeated_filesync(
+ cancel_repeated_filesync(
+ State#{filesync_repeat_interval=>FSyncInt}))
+ end,
+ HS = try Module:config_changed(Name, HConfig, HandlerState)
+ catch error:undef -> HandlerState
+ end,
+ {noreply, State1#{handler_state => HS}}.
handle_info(Info, #{id := Name, module := Module,
handler_state := HandlerState} = State) ->
@@ -447,10 +461,3 @@ cancel_repeated_filesync(State) ->
end.
error_notify(Term) ->
?internal_log(error, Term).
-
-maybe_set_repeated_filesync(_Olp,
- #{filesync_repeat_interval:=FSyncInt},
- #{filesync_repeat_interval:=FSyncInt}) ->
- ok;
-maybe_set_repeated_filesync(Olp,_,#{filesync_repeat_interval:=FSyncInt}) ->
- logger_olp:cast(Olp,{set_repeated_filesync,FSyncInt}).
diff --git a/lib/kernel/src/logger_olp.erl b/lib/kernel/src/logger_olp.erl
index 009280a9c9..8365383fe2 100644
--- a/lib/kernel/src/logger_olp.erl
+++ b/lib/kernel/src/logger_olp.erl
@@ -515,10 +515,11 @@ check_load(State = #{id:=_Name, mode_ref := ModeRef, mode := Mode,
end,
State1 = ?update_other(drops,DROPS,_NewDrops,State),
State2 = ?update_max_qlen(QLen,State1),
- State3 = maybe_notify_mode_change(Mode1,State2),
+ State3 = ?update_max_mem(Mem,State2),
+ State4 = maybe_notify_mode_change(Mode1,State3),
{Mode1, QLen, Mem,
?update_other(flushes,FLUSHES,_NewFlushes,
- State3#{last_qlen => QLen})}.
+ State4#{last_qlen => QLen})}.
limit_burst(#{burst_limit_enable := false}=State) ->
{true,State};
diff --git a/lib/kernel/src/logger_olp.hrl b/lib/kernel/src/logger_olp.hrl
index 9b4f5ebf27..d68b5c048d 100644
--- a/lib/kernel/src/logger_olp.hrl
+++ b/lib/kernel/src/logger_olp.hrl
@@ -114,12 +114,16 @@
flushes => 0, flushed => 0, drops => 0,
burst_drops => 0, casts => 0, calls => 0,
writes => 0, max_qlen => 0, max_time => 0,
- freq => {TIME,0,0}} end).
+ max_mem => 0, freq => {TIME,0,0}} end).
-define(update_max_qlen(QLEN, STATE),
begin #{max_qlen := QLEN0} = STATE,
STATE#{max_qlen => ?max(QLEN0,QLEN)} end).
+ -define(update_max_mem(MEM, STATE),
+ begin #{max_mem := MEM0} = STATE,
+ STATE#{max_mem => ?max(MEM0,MEM)} end).
+
-define(update_calls_or_casts(CALL_OR_CAST, INC, STATE),
case CALL_OR_CAST of
cast ->
@@ -154,6 +158,7 @@
-else. % DEFAULT!
-define(merge_with_stats(STATE), STATE).
-define(update_max_qlen(_QLEN, STATE), STATE).
+ -define(update_max_mem(_MEM, STATE), STATE).
-define(update_calls_or_casts(_CALL_OR_CAST, _INC, STATE), STATE).
-define(update_max_time(_TIME, STATE), STATE).
-define(update_other(_OTHER, _VAR, _INCVAL, STATE), STATE).
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
index 65f5b3876e..c8f1acfca4 100644
--- a/lib/kernel/src/logger_std_h.erl
+++ b/lib/kernel/src/logger_std_h.erl
@@ -29,7 +29,7 @@
-export([filesync/1]).
%% logger_h_common callbacks
--export([init/2, check_config/4, reset_state/2,
+-export([init/2, check_config/4, config_changed/3, reset_state/2,
filesync/3, write/4, handle_info/3, terminate/3]).
%% logger callbacks
@@ -105,85 +105,169 @@ filter_config(Config) ->
%%%===================================================================
%%% logger_h_common callbacks
%%%===================================================================
-init(Name, #{type := Type}) ->
- case open_log_file(Name, Type) of
+init(Name, Config) ->
+ MyConfig = maps:with([type,file,modes,file_check,max_no_bytes,
+ max_no_files,compress_on_rotate],Config),
+ case file_ctrl_start(Name, MyConfig) of
{ok,FileCtrlPid} ->
- {ok,#{type=>Type,file_ctrl_pid=>FileCtrlPid}};
+ {ok,MyConfig#{file_ctrl_pid=>FileCtrlPid}};
Error ->
Error
end.
-check_config(_Name,set,undefined,NewHConfig) ->
- check_config(maps:merge(get_default_config(),NewHConfig));
-check_config(_Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
- WriteOnce = maps:with([type],OldHConfig),
+check_config(Name,set,undefined,NewHConfig) ->
+ check_h_config(merge_default_config(Name,normalize_config(NewHConfig)));
+check_config(Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
+ WriteOnce = maps:with([type,file,modes],OldHConfig),
Default =
case SetOrUpdate of
set ->
%% Do not reset write-once fields to defaults
- maps:merge(get_default_config(),WriteOnce);
+ merge_default_config(Name,WriteOnce);
update ->
OldHConfig
end,
- NewHConfig = maps:merge(Default, NewHConfig0),
+ NewHConfig = maps:merge(Default, normalize_config(NewHConfig0)),
%% Fail if write-once fields are changed
- case maps:with([type],NewHConfig) of
+ case maps:with([type,file,modes],NewHConfig) of
WriteOnce ->
- check_config(NewHConfig);
+ check_h_config(NewHConfig);
Other ->
{error,{illegal_config_change,?MODULE,WriteOnce,Other}}
end.
-check_config(#{type:=Type}=HConfig) ->
- case check_h_config(maps:to_list(HConfig)) of
- ok when is_atom(Type) ->
- {ok,HConfig#{filesync_repeat_interval=>no_repeat}};
+check_h_config(HConfig) ->
+ case check_h_config(maps:get(type,HConfig),maps:to_list(HConfig)) of
ok ->
- {ok,HConfig};
+ {ok,fix_file_opts(HConfig)};
{error,{Key,Value}} ->
{error,{invalid_config,?MODULE,#{Key=>Value}}}
end.
-check_h_config([{type,Type} | Config]) when Type == standard_io;
- Type == standard_error ->
- check_h_config(Config);
-check_h_config([{type,{file,File}} | Config]) when is_list(File) ->
- check_h_config(Config);
-check_h_config([{type,{file,File,Modes}} | Config]) when is_list(File),
- is_list(Modes) ->
- check_h_config(Config);
-check_h_config([Other | _]) ->
+check_h_config(Type,[{type,Type} | Config]) when Type =:= standard_io;
+ Type =:= standard_error;
+ Type =:= file ->
+ check_h_config(Type,Config);
+check_h_config(file,[{file,File} | Config]) when is_list(File) ->
+ check_h_config(file,Config);
+check_h_config(file,[{modes,Modes} | Config]) when is_list(Modes) ->
+ check_h_config(file,Config);
+check_h_config(file,[{max_no_bytes,Size} | Config])
+ when (is_integer(Size) andalso Size>0) orelse Size=:=infinity ->
+ check_h_config(file,Config);
+check_h_config(file,[{max_no_files,Num} | Config]) when is_integer(Num), Num>=0 ->
+ check_h_config(file,Config);
+check_h_config(file,[{compress_on_rotate,Bool} | Config]) when is_boolean(Bool) ->
+ check_h_config(file,Config);
+check_h_config(file,[{file_check,FileCheck} | Config])
+ when is_integer(FileCheck), FileCheck>=0 ->
+ check_h_config(file,Config);
+check_h_config(_Type,[Other | _]) ->
{error,Other};
-check_h_config([]) ->
+check_h_config(_Type,[]) ->
ok.
-get_default_config() ->
- #{type => standard_io}.
+normalize_config(#{type:={file,File}}=HConfig) ->
+ HConfig#{type=>file,file=>File};
+normalize_config(#{type:={file,File,Modes}}=HConfig) ->
+ HConfig#{type=>file,file=>File,modes=>Modes};
+normalize_config(HConfig) ->
+ HConfig.
+
+merge_default_config(Name,#{type:=Type}=HConfig) ->
+ merge_default_config(Name,Type,HConfig);
+merge_default_config(Name,#{file:=_}=HConfig) ->
+ merge_default_config(Name,file,HConfig);
+merge_default_config(Name,HConfig) ->
+ merge_default_config(Name,standard_io,HConfig).
+
+merge_default_config(Name,Type,HConfig) ->
+ maps:merge(get_default_config(Name,Type),HConfig).
+
+get_default_config(Name,file) ->
+ #{type => file,
+ file => atom_to_list(Name),
+ modes => [raw,append],
+ file_check => 0,
+ max_no_bytes => infinity,
+ max_no_files => 0,
+ compress_on_rotate => false};
+get_default_config(_Name,Type) ->
+ #{type => Type}.
+
+fix_file_opts(#{modes:=Modes}=HConfig) ->
+ HConfig#{modes=>fix_modes(Modes)};
+fix_file_opts(HConfig) ->
+ HConfig#{filesync_repeat_interval=>no_repeat}.
+
+fix_modes(Modes) ->
+ %% Ensure write|append|exclusive
+ Modes1 =
+ case [M || M <- Modes,
+ lists:member(M,[write,append,exclusive])] of
+ [] -> [append|Modes];
+ _ -> Modes
+ end,
+ %% Ensure raw
+ Modes2 =
+ case lists:member(raw,Modes) of
+ false -> [raw|Modes1];
+ true -> Modes1
+ end,
+ %% Ensure delayed_write
+ case lists:partition(fun(delayed_write) -> true;
+ ({delayed_write,_,_}) -> true;
+ (_) -> false
+ end, Modes2) of
+ {[],_} ->
+ [delayed_write|Modes2];
+ _ ->
+ Modes2
+ end.
-filesync(_Name, _Mode, #{type := Type}=State) when is_atom(Type) ->
- {ok,State};
-filesync(_Name, async, #{file_ctrl_pid := FileCtrlPid} = State) ->
- ok = file_ctrl_filesync_async(FileCtrlPid),
- {ok,State};
-filesync(_Name, sync, #{file_ctrl_pid := FileCtrlPid} = State) ->
- Result = file_ctrl_filesync_sync(FileCtrlPid),
+config_changed(_Name,
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress},
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}=State) ->
+ State;
+config_changed(_Name,
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress},
+ #{file_ctrl_pid := FileCtrlPid} = State) ->
+ FileCtrlPid ! {update_config,#{file_check=>FileCheck,
+ max_no_bytes=>Size,
+ max_no_files=>Count,
+ compress_on_rotate=>Compress}},
+ State#{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress};
+config_changed(_Name,_NewHConfig,State) ->
+ State.
+
+filesync(_Name, SyncAsync, #{file_ctrl_pid := FileCtrlPid} = State) ->
+ Result = file_ctrl_filesync(SyncAsync, FileCtrlPid),
{Result,State}.
-write(_Name, async, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
- ok = file_write_async(FileCtrlPid, Bin),
- {ok,State};
-write(_Name, sync, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
- Result = file_write_sync(FileCtrlPid, Bin),
+write(_Name, SyncAsync, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
+ Result = file_write(SyncAsync, FileCtrlPid, Bin),
{Result,State}.
reset_state(_Name, State) ->
State.
-handle_info(_Name, {'EXIT',Pid,Why}, #{type := FileInfo, file_ctrl_pid := Pid}) ->
+handle_info(_Name, {'EXIT',Pid,Why}, #{file_ctrl_pid := Pid}=State) ->
%% file_ctrl_pid died, file error, terminate handler
- exit({error,{write_failed,FileInfo,Why}});
+ exit({error,{write_failed,maps:with([type,file,modes],State),Why}});
handle_info(_, _, State) ->
State.
@@ -211,27 +295,33 @@ terminate(_Name, _Reason, #{file_ctrl_pid:=FWPid}) ->
%%%-----------------------------------------------------------------
%%%
-open_log_file(HandlerName, FileInfo) ->
- case file_ctrl_start(HandlerName, FileInfo) of
- OK = {ok,_FileCtrlPid} -> OK;
- Error -> Error
- end.
-
-do_open_log_file({file,FileName}) ->
- do_open_log_file({file,FileName,[raw,append,delayed_write]});
-
-do_open_log_file({file,FileName,[]}) ->
- do_open_log_file({file,FileName,[raw,append,delayed_write]});
-
-do_open_log_file({file,FileName,Modes}) ->
+open_log_file(HandlerName,#{type:=file,
+ file:=FileName,
+ modes:=Modes,
+ file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}) ->
try
case filelib:ensure_dir(FileName) of
ok ->
case file:open(FileName, Modes) of
{ok, Fd} ->
{ok,#file_info{inode=INode}} =
- file:read_file_info(FileName),
- {ok, {Fd, INode}};
+ file:read_file_info(FileName,[raw]),
+ UpdateModes = [append | Modes--[write,append,exclusive]],
+ State0 = #{handler_name=>HandlerName,
+ file_name=>FileName,
+ modes=>UpdateModes,
+ file_check=>FileCheck,
+ fd=>Fd,
+ inode=>INode,
+ last_check=>timestamp(),
+ synced=>false,
+ write_res=>ok,
+ sync_res=>ok},
+ State = update_rotation({Size,Count,Compress},State0),
+ {ok,State};
Error ->
Error
end;
@@ -242,21 +332,23 @@ do_open_log_file({file,FileName,Modes}) ->
_:Reason -> {error,Reason}
end.
-close_log_file(Std) when Std == standard_io; Std == standard_error ->
- ok;
-close_log_file({Fd,_}) ->
+close_log_file(#{fd:=Fd}) ->
_ = file:datasync(Fd),
- _ = file:close(Fd).
+ _ = file:close(Fd),
+ ok;
+close_log_file(_) ->
+ ok.
+
%%%-----------------------------------------------------------------
%%% File control process
-file_ctrl_start(HandlerName, FileInfo) ->
+file_ctrl_start(HandlerName, HConfig) ->
Starter = self(),
FileCtrlPid =
spawn_link(fun() ->
- file_ctrl_init(HandlerName, FileInfo, Starter)
+ file_ctrl_init(HandlerName, HConfig, Starter)
end),
receive
{FileCtrlPid,ok} ->
@@ -271,18 +363,16 @@ file_ctrl_start(HandlerName, FileInfo) ->
file_ctrl_stop(Pid) ->
Pid ! stop.
-file_write_async(Pid, Bin) ->
+file_write(async, Pid, Bin) ->
Pid ! {log,Bin},
- ok.
-
-file_write_sync(Pid, Bin) ->
+ ok;
+file_write(sync, Pid, Bin) ->
file_ctrl_call(Pid, {log,Bin}).
-file_ctrl_filesync_async(Pid) ->
+file_ctrl_filesync(async, Pid) ->
Pid ! filesync,
- ok.
-
-file_ctrl_filesync_sync(Pid) ->
+ ok;
+file_ctrl_filesync(sync, Pid) ->
file_ctrl_call(Pid, filesync).
file_ctrl_call(Pid, Msg) ->
@@ -299,98 +389,255 @@ file_ctrl_call(Pid, Msg) ->
{error,{no_response,Pid}}
end.
-file_ctrl_init(HandlerName, FileInfo, Starter) when is_tuple(FileInfo) ->
+file_ctrl_init(HandlerName,
+ #{type:=file,
+ file:=FileName} = HConfig,
+ Starter) ->
process_flag(message_queue_data, off_heap),
- FileName = element(2, FileInfo),
- case do_open_log_file(FileInfo) of
- {ok,File} ->
+ case open_log_file(HandlerName,HConfig) of
+ {ok,State} ->
Starter ! {self(),ok},
- file_ctrl_loop(File, FileName, false, ok, ok, HandlerName);
+ file_ctrl_loop(State);
{error,Reason} ->
Starter ! {self(),{error,{open_failed,FileName,Reason}}}
end;
-file_ctrl_init(HandlerName, StdDev, Starter) ->
+file_ctrl_init(HandlerName, #{type:=StdDev}, Starter) ->
Starter ! {self(),ok},
- file_ctrl_loop(StdDev, StdDev, false, ok, ok, HandlerName).
+ file_ctrl_loop(#{handler_name=>HandlerName,dev=>StdDev}).
-file_ctrl_loop(File, DevName, Synced,
- PrevWriteResult, PrevSyncResult, HandlerName) ->
+file_ctrl_loop(State) ->
receive
%% asynchronous event
{log,Bin} ->
- File1 = ensure(File, DevName),
- Result = write_to_dev(File1, Bin, DevName,
- PrevWriteResult, HandlerName),
- file_ctrl_loop(File1, DevName, false,
- Result, PrevSyncResult, HandlerName);
+ State1 = write_to_dev(Bin,State),
+ file_ctrl_loop(State1);
%% synchronous event
{{log,Bin},{From,MRef}} ->
- File1 = ensure(File, DevName),
- Result = write_to_dev(File1, Bin, DevName,
- PrevWriteResult, HandlerName),
+ State1 = ensure_file(State),
+ State2 = write_to_dev(Bin,State1),
From ! {MRef,ok},
- file_ctrl_loop(File1, DevName, false,
- Result, PrevSyncResult, HandlerName);
+ file_ctrl_loop(State2);
filesync ->
- File1 = ensure(File, DevName),
- Result = sync_dev(File1, DevName, Synced,
- PrevSyncResult, HandlerName),
- file_ctrl_loop(File1, DevName, true,
- PrevWriteResult, Result, HandlerName);
+ State1 = sync_dev(State),
+ file_ctrl_loop(State1);
{filesync,{From,MRef}} ->
- File1 = ensure(File, DevName),
- Result = sync_dev(File1, DevName, Synced,
- PrevSyncResult, HandlerName),
+ State1 = ensure_file(State),
+ State2 = sync_dev(State1),
From ! {MRef,ok},
- file_ctrl_loop(File1, DevName, true,
- PrevWriteResult, Result, HandlerName);
+ file_ctrl_loop(State2);
+
+ {update_config,#{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}} ->
+ State1 = update_rotation({Size,Count,Compress},State),
+ file_ctrl_loop(State1#{file_check=>FileCheck});
stop ->
- _ = close_log_file(File),
+ close_log_file(State),
stopped
end.
+maybe_ensure_file(#{file_check:=0}=State) ->
+ ensure_file(State);
+maybe_ensure_file(#{last_check:=T0,file_check:=CheckInt}=State)
+ when is_integer(CheckInt) ->
+ T = timestamp(),
+ if T-T0 > CheckInt -> ensure_file(State);
+ true -> State
+ end;
+maybe_ensure_file(State) ->
+ State.
+
%% In order to play well with tools like logrotate, we need to be able
%% to re-create the file if it has disappeared (e.g. if rotated by
%% logrotate)
-ensure(Fd,DevName) when is_atom(DevName) ->
- Fd;
-ensure({Fd,INode},FileName) ->
- case file:read_file_info(FileName) of
- {ok,#file_info{inode=INode}} ->
- {Fd,INode};
+ensure_file(#{fd:=Fd0,inode:=INode0,file_name:=FileName,modes:=Modes}=State) ->
+ case file:read_file_info(FileName,[raw]) of
+ {ok,#file_info{inode=INode0}} ->
+ State#{last_check=>timestamp()};
_ ->
- _ = file:close(Fd),
- _ = file:close(Fd), % delayed_write cause close not to close
- case do_open_log_file({file,FileName}) of
- {ok,File} ->
- File;
+ close_log_file(Fd0),
+ case file:open(FileName,Modes) of
+ {ok,Fd} ->
+ {ok,#file_info{inode=INode}} =
+ file:read_file_info(FileName,[raw]),
+ State#{fd=>Fd,inode=>INode,
+ last_check=>timestamp(),
+ synced=>true,sync_res=>ok};
Error ->
exit({could_not_reopen_file,Error})
end
- end.
+ end;
+ensure_file(State) ->
+ State.
-write_to_dev(DevName, Bin, _DevName, _PrevWriteResult, _HandlerName)
- when is_atom(DevName) ->
- io:put_chars(DevName, Bin);
-write_to_dev({Fd,_}, Bin, FileName, PrevWriteResult, HandlerName) ->
+write_to_dev(Bin,#{dev:=DevName}=State) ->
+ io:put_chars(DevName, Bin),
+ State;
+write_to_dev(Bin, State) ->
+ State1 = #{fd:=Fd} = maybe_ensure_file(State),
Result = ?file_write(Fd, Bin),
- maybe_notify_error(write,Result,PrevWriteResult,FileName,HandlerName).
+ State2 = maybe_rotate_file(Bin,State1),
+ maybe_notify_error(write,Result,State2),
+ State2#{synced=>false,write_res=>Result}.
-sync_dev(_, _FileName, true, PrevSyncResult, _HandlerName) ->
- PrevSyncResult;
-sync_dev({Fd,_}, FileName, false, PrevSyncResult, HandlerName) ->
+sync_dev(#{synced:=false}=State) ->
+ State1 = #{fd:=Fd} = maybe_ensure_file(State),
Result = ?file_datasync(Fd),
- maybe_notify_error(filesync,Result,PrevSyncResult,FileName,HandlerName).
+ maybe_notify_error(filesync,Result,State1),
+ State1#{synced=>true,sync_res=>Result};
+sync_dev(State) ->
+ State.
-maybe_notify_error(_Op, ok, _PrevResult, _FileName, _HandlerName) ->
+update_rotation({infinity,_,_},State) ->
+ maybe_remove_archives(0,State),
+ maps:remove(rotation,State);
+update_rotation({Size,Count,Compress},#{file_name:=FileName} = State) ->
+ maybe_remove_archives(Count,State),
+ {ok,#file_info{size=CurrSize}} = file:read_file_info(FileName,[raw]),
+ State1 = State#{rotation=>#{size=>Size,
+ count=>Count,
+ compress=>Compress,
+ curr_size=>CurrSize}},
+ maybe_update_compress(0,State1),
+ maybe_rotate_file(0,State1).
+
+maybe_remove_archives(Count,#{file_name:=FileName}=State) ->
+ Archive = rot_file_name(FileName,Count,false),
+ CompressedArchive = rot_file_name(FileName,Count,true),
+ case {file:read_file_info(Archive,[raw]),
+ file:read_file_info(CompressedArchive,[raw])} of
+ {{error,enoent},{error,enoent}} ->
+ ok;
+ _ ->
+ _ = file:delete(Archive),
+ _ = file:delete(CompressedArchive),
+ maybe_remove_archives(Count+1,State)
+ end.
+
+maybe_update_compress(Count,#{rotation:=#{count:=Count}}) ->
+ ok;
+maybe_update_compress(N,#{file_name:=FileName,
+ rotation:=#{compress:=Compress}}=State) ->
+ Archive = rot_file_name(FileName,N,not Compress),
+ case file:read_file_info(Archive,[raw]) of
+ {ok,_} when Compress ->
+ compress_file(Archive);
+ {ok,_} ->
+ decompress_file(Archive);
+ _ ->
+ ok
+ end,
+ maybe_update_compress(N+1,State).
+
+maybe_rotate_file(Bin,#{rotation:=_}=State) when is_binary(Bin) ->
+ maybe_rotate_file(byte_size(Bin),State);
+maybe_rotate_file(AddSize,#{rotation:=#{size:=RotSize,
+ curr_size:=CurrSize}=Rotation}=State) ->
+ NewSize = CurrSize + AddSize,
+ if NewSize>RotSize ->
+ rotate_file(State#{rotation=>Rotation#{curr_size=>NewSize}});
+ true ->
+ State#{rotation=>Rotation#{curr_size=>NewSize}}
+ end;
+maybe_rotate_file(_Bin,State) ->
+ State.
+
+rotate_file(#{fd:=Fd0,file_name:=FileName,modes:=Modes,rotation:=Rotation}=State) ->
+ State1 = sync_dev(State),
+ _ = file:close(Fd0),
+ _ = file:close(Fd0),
+ rotate_files(FileName,maps:get(count,Rotation),maps:get(compress,Rotation)),
+ case file:open(FileName,Modes) of
+ {ok,Fd} ->
+ {ok,#file_info{inode=INode}} = file:read_file_info(FileName,[raw]),
+ State1#{fd=>Fd,inode=>INode,rotation=>Rotation#{curr_size=>0}};
+ Error ->
+ exit({could_not_reopen_file,Error})
+ end.
+
+rotate_files(FileName,0,_Compress) ->
+ _ = file:delete(FileName),
+ ok;
+rotate_files(FileName,1,Compress) ->
+ FileName0 = FileName++".0",
+ _ = file:rename(FileName,FileName0),
+ if Compress -> compress_file(FileName0);
+ true -> ok
+ end,
ok;
-maybe_notify_error(_Op, PrevResult, PrevResult, _FileName, _HandlerName) ->
+rotate_files(FileName,Count,Compress) ->
+ _ = file:rename(rot_file_name(FileName,Count-2,Compress),
+ rot_file_name(FileName,Count-1,Compress)),
+ rotate_files(FileName,Count-1,Compress).
+
+rot_file_name(FileName,Count,false) ->
+ FileName ++ "." ++ integer_to_list(Count);
+rot_file_name(FileName,Count,true) ->
+ rot_file_name(FileName,Count,false) ++ ".gz".
+
+compress_file(FileName) ->
+ {ok,In} = file:open(FileName,[read,binary]),
+ {ok,Out} = file:open(FileName++".gz",[write]),
+ Z = zlib:open(),
+ zlib:deflateInit(Z, default, deflated, 31, 8, default),
+ compress_data(Z,In,Out),
+ zlib:deflateEnd(Z),
+ zlib:close(Z),
+ _ = file:close(In),
+ _ = file:close(Out),
+ _ = file:delete(FileName),
+ ok.
+
+compress_data(Z,In,Out) ->
+ case file:read(In,100000) of
+ {ok,Data} ->
+ Compressed = zlib:deflate(Z, Data),
+ _ = file:write(Out,Compressed),
+ compress_data(Z,In,Out);
+ eof ->
+ Compressed = zlib:deflate(Z, <<>>, finish),
+ _ = file:write(Out,Compressed),
+ ok
+ end.
+
+decompress_file(FileName) ->
+ {ok,In} = file:open(FileName,[read,binary]),
+ {ok,Out} = file:open(filename:rootname(FileName,".gz"),[write]),
+ Z = zlib:open(),
+ zlib:inflateInit(Z, 31),
+ decompress_data(Z,In,Out),
+ zlib:inflateEnd(Z),
+ zlib:close(Z),
+ _ = file:close(In),
+ _ = file:close(Out),
+ _ = file:delete(FileName),
+ ok.
+
+decompress_data(Z,In,Out) ->
+ case file:read(In,1000) of
+ {ok,Data} ->
+ Decompressed = zlib:inflate(Z, Data),
+ _ = file:write(Out,Decompressed),
+ decompress_data(Z,In,Out);
+ eof ->
+ ok
+ end.
+
+maybe_notify_error(_Op, ok, _State) ->
+ ok;
+maybe_notify_error(Op, Result, #{write_res:=WR,sync_res:=SR})
+ when (Op==write andalso Result==WR) orelse
+ (Op==filesync andalso Result==SR) ->
%% don't report same error twice
- PrevResult;
-maybe_notify_error(Op, Error, _PrevResult, FileName, HandlerName) ->
+ ok;
+maybe_notify_error(Op, Error, #{handler_name:=HandlerName,file_name:=FileName}) ->
logger_h_common:error_notify({HandlerName,Op,FileName,Error}),
- Error.
+ ok.
+
+timestamp() ->
+ erlang:monotonic_time(millisecond).
diff --git a/lib/kernel/test/kernel.spec b/lib/kernel/test/kernel.spec
index 62afc9f97b..eaa17f3a59 100644
--- a/lib/kernel/test/kernel.spec
+++ b/lib/kernel/test/kernel.spec
@@ -2,3 +2,4 @@
{config, "../test_server/ts.unix.config"}.
{suites,"../kernel_test", all}.
+{skip_suites,"../kernel_test",[logger_stress_SUITE],"Benchmarks only"}.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
index 70bb775db8..035e5d8974 100644
--- a/lib/kernel/test/logger_SUITE.erl
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -1048,8 +1048,11 @@ kernel_config(Config) ->
ok = rpc:call(Node,logger,internal_init_logger,[]),
ok = rpc:call(Node,logger,add_handlers,[kernel]),
#{primary:=#{filter_default:=log,filters:=[]},
- handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}],
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+ [append,delayed_write,raw] = lists:sort(Modes),
+
%% Same, but using 'logger' parameter instead of 'error_logger'
ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
@@ -1060,26 +1063,27 @@ kernel_config(Config) ->
ok = rpc:call(Node,logger,internal_init_logger,[]),
ok = rpc:call(Node,logger,add_handlers,[kernel]),
#{primary:=#{filter_default:=log,filters:=[]},
- handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}],
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
%% Same, but with type={file,File,Modes}
ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
- M = [raw,write,delayed_write],
+ M = [raw,write],
ok = rpc:call(Node,application,set_env,[kernel,logger,
[{handler,default,logger_std_h,
#{config=>#{type=>{file,F,M}}}}]]),
ok = rpc:call(Node,logger,internal_init_logger,[]),
ok = rpc:call(Node,logger,add_handlers,[kernel]),
#{primary:=#{filter_default:=log,filters:=[]},
- handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F,M}}}],
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=[delayed_write|M]}}],
module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
%% Same, but with disk_log handler
ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
- M = [raw,write,delayed_write],
ok = rpc:call(Node,application,set_env,[kernel,logger,
[{handler,default,logger_disk_log_h,
#{config=>#{file=>F}}}]]),
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index b2c2c8ba67..0c5516f82b 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -112,6 +112,8 @@ all() ->
add_remove_instance_standard_error,
add_remove_instance_file1,
add_remove_instance_file2,
+ add_remove_instance_file3,
+ add_remove_instance_file4,
default_formatter,
filter_config,
errors,
@@ -142,7 +144,12 @@ all() ->
restart_after,
handler_requests_under_load,
recreate_deleted_log,
- reopen_changed_log
+ reopen_changed_log,
+ rotate_size,
+ rotate_size_compressed,
+ rotate_size_reopen,
+ rotation_opts,
+ rotation_opts_restart_handler
].
add_remove_instance_tty(_Config) ->
@@ -179,10 +186,27 @@ add_remove_instance_file2(Config) ->
add_remove_instance_file2(cleanup,_Config) ->
logger_std_h_remove().
-add_remove_instance_file(Log, Type) ->
+add_remove_instance_file3(_Config) ->
+ Log = atom_to_list(?MODULE),
+ StdHConfig = #{type=>file},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file3(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file4(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog4.txt"),
+ StdHConfig = #{file=>Log,modes=>[]},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file4(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file(Log, Type) when not is_map(Type) ->
+ add_remove_instance_file(Log,#{type=>Type});
+add_remove_instance_file(Log, StdHConfig) when is_map(StdHConfig) ->
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => Type},
+ #{config => StdHConfig,
filter_default=>stop,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
@@ -257,9 +281,10 @@ errors(Config) ->
end,
{error,
- {handler_not_added,{open_failed,Log,_}}} =
+ {handler_not_added,
+ {invalid_config,logger_std_h,#{modes:=bad_file_opt}}}} =
logger:add_handler(myh3,logger_std_h,
- #{config=>#{type=>{file,Log,[bad_file_opt]}}}),
+ #{config=>#{type=>{file,Log,bad_file_opt}}}),
ok = logger:notice(?msg).
@@ -607,24 +632,51 @@ reconfig(cleanup, _Config) ->
file_opts(Config) ->
Dir = ?config(priv_dir,Config),
Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
- BadFileOpts = [raw],
- BadType = {file,Log,BadFileOpts},
- {error,{handler_not_added,{open_failed,Log,enoent}}} =
- logger:add_handler(?MODULE, logger_std_h,
- #{config => #{type => BadType}}),
+ MissingOpts = [raw],
+ Type1 = {file,Log,MissingOpts},
+ ok = logger:add_handler(?MODULE, logger_std_h,
+ #{config => #{type => Type1}}),
+ {ok,#{config:=#{type:=file,file:=Log,modes:=Modes1}}} =
+ logger:get_handler_config(?MODULE),
+ [append,delayed_write,raw] = lists:sort(Modes1),
+ ok = logger:remove_handler(?MODULE),
OkFileOpts = [raw,append],
OkType = {file,Log,OkFileOpts},
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => OkType},
+ #{config => #{type => OkType}, % old format
filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
- #{cb_state := #{handler_state := #{type := OkType}}} =
+ ModOpts = [delayed_write|OkFileOpts],
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
logger_olp:info(h_proc_name()),
- {ok,#{config := #{type := OkType}}} = logger:get_handler_config(?MODULE),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => file,
+ file => Log,
+ modes => OkFileOpts}, % new format
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger:get_handler_config(?MODULE),
logger:notice(M1=?msg,?domain),
?check(M1),
B1 = ?bin(M1),
@@ -640,13 +692,14 @@ sync(Config) ->
Type = {file,Log},
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => Type},
+ #{config => #{type => Type,
+ file_check => 10000},
filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,nl}}),
%% check repeated filesync happens
- start_tracer([{logger_std_h, write_to_dev, 5},
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"first\n">>},
{file,datasync}]),
@@ -656,7 +709,7 @@ sync(Config) ->
check_tracer(filesync_rep_int()*2),
%% check that explicit filesync is only done once
- start_tracer([{logger_std_h, write_to_dev, 5},
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"second\n">>},
{file,datasync},
@@ -675,7 +728,7 @@ sync(Config) ->
#{filesync_repeat_interval => no_repeat}),
no_repeat = maps:get(filesync_repeat_interval,
maps:get(cb_state, logger_olp:info(h_proc_name()))),
- start_tracer([{logger_std_h, write_to_dev, 5},
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"third\n">>},
{file,datasync},
@@ -1285,6 +1338,331 @@ reopen_changed_log(Config) ->
reopen_changed_log(cleanup, _Config) ->
ok = stop_handler(?MODULE).
+rotate_size(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1005}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ ok.
+rotate_size(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_compressed(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ #{config=>#{max_no_bytes=>1000,
+ max_no_files=>2,
+ compress_on_rotate=>true}}),
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".0.gz"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".1.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=38}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ ok.
+rotate_size_compressed(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_reopen(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ {ok,HConfig} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(?MODULE,maps:get(module,HConfig),HConfig),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=580}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ ok.
+rotate_size_reopen(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts(Config) ->
+ {Log,_HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ #{max_no_bytes:=infinity,
+ max_no_files:=0,
+ compress_on_rotate:=false} = StdHConfig,
+
+ %% Test bad rotation config
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_bytes=>0}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_files=>infinity}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,
+ #{compress_on_rotate=>undefined}),
+
+
+ %% Test good rotation config - start with no rotation
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=200}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Turn on rotation, check that existing file is rotated since its
+ %% size exceeds max_no_bytes
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0 = Log++".0",
+ {ok,#file_info{size=200}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count and check that nothing changes with existing files
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>200,
+ max_no_files=>3}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Add more log events and see that extended size and count works
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".2"),
+ [_,_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and check that archive files that exceed the new
+ %% count are moved
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count again, and turn on compression. Check
+ %% that archives are compressed
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2,
+ compress_on_rotate=>true}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0gz = Log0++".gz",
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ [Log0gz] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".1.gz"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and turn off compression. Check that archives that
+ %% exceeds the new count are removed, and the rest are
+ %% uncompressed.
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1,
+ compress_on_rotate=>false}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Check that config and handler state agree on the current rotation settings
+ {ok,#{config:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger:get_handler_config(?MODULE),
+ #{cb_state:=#{handler_state:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger_olp:info(h_proc_name()),
+ ok.
+rotation_opts(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts_restart_handler(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+
+ %% Fill all logs
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,15)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off rotation. Check that archives are removed.
+ {ok,#{config:=StdHConfig1}=HConfig1} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig1#{config=>StdHConfig1#{max_no_bytes=>infinity}}),
+ timer:sleep(100),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Add some log events and check that file is no longer rotated.
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=260}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and trun on rotation. Check that file is rotated.
+ {ok,#{config:=StdHConfig2}=HConfig2} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig2#{config=>StdHConfig2#{max_no_bytes=>100,
+ max_no_files=>2}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=80}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".1"),
+
+ %% Stop/start handler, reduce count and turn on compression. Check
+ %% that excess archives are removed, and the rest compressed.
+ {ok,#{config:=StdHConfig3}=HConfig3} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig3#{config=>StdHConfig3#{max_no_bytes=>75,
+ max_no_files=>1,
+ compress_on_rotate=>true}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".0.gz"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off compression. Check that achives
+ %% are decompressed.
+ {ok,#{config:=StdHConfig4}=HConfig4} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig4#{config=>StdHConfig4#{compress_on_rotate=>false}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=80}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ ok.
+rotation_opts_restart_handler(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
%%%-----------------------------------------------------------------
%%%
send_requests(TO, Reqs = [{Mod,Func,Args,Res}|Rs]) ->
@@ -1305,8 +1683,8 @@ start_handler(Name, TTY, _Config) when TTY == standard_io;
ok = logger:add_handler(Name,
logger_std_h,
#{config => #{type => TTY},
- filter_default=>log,
- filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
formatter=>{?MODULE,op}}),
{ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
{HConfig,StdHConfig};
@@ -1320,12 +1698,17 @@ start_handler(Name, FuncName, Config) ->
ok = logger:add_handler(Name,
logger_std_h,
#{config => #{type => Type},
- filter_default=>log,
- filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
formatter=>{?MODULE,op}}),
{ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
{Log,HConfig,StdHConfig}.
+
+filter_only_this_domain(Name) ->
+ [{remote_gl,{fun logger_filters:remote_gl/2,stop}},
+ {domain,{fun logger_filters:domain/2,{log,super,[Name]}}}].
+
stop_handler(Name) ->
R = logger:remove_handler(Name),
ct:pal("Handler ~p stopped! Result: ~p", [Name,R]),
@@ -1658,7 +2041,7 @@ tpl([]) ->
tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]}},
{Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
-tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}},
+tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[Data,_]}},
{Pid,[{Mod,Func,Data}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
@@ -1742,4 +2125,3 @@ filesync_rep_int() ->
file_delete(Log) ->
file:delete(Log).
-
diff --git a/lib/kernel/test/logger_stress_SUITE.erl b/lib/kernel/test/logger_stress_SUITE.erl
index 4072e8c86a..1a278fb1b2 100644
--- a/lib/kernel/test/logger_stress_SUITE.erl
+++ b/lib/kernel/test/logger_stress_SUITE.erl
@@ -67,6 +67,10 @@ all() ->
reject_events,
std_handler,
disk_log_handler,
+ std_handler_time,
+ std_handler_time_big,
+ disk_log_handler_time,
+ disk_log_handler_time_big,
emulator_events,
remote_events,
remote_to_disk_log,
@@ -123,6 +127,20 @@ std_handler(cleanup,_Config) ->
_ = file:delete("default.log"),
ok.
+%% Disable overload protection and just print a lot - measure time.
+%% The IOPS reported is the number of log events written per millisecond.
+std_handler_time(Config) ->
+ measure_handler_time(logger_std_h,#{type=>{file,"default.log"}},Config).
+std_handler_time(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
+std_handler_time_big(Config) ->
+ measure_handler_time_big(logger_std_h,#{type=>{file,"default.log"}},Config).
+std_handler_time_big(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
%% Cascading failure that produce gen_server and proc_lib reports -
%% how many of the produced log events are actually written to a log
%% with logger_disk_log_h wrap file handler.
@@ -140,6 +158,20 @@ disk_log_handler(cleanup,_Config) ->
[_ = file:delete(F) || F <- Files],
ok.
+%% Disable overload protection and just print a lot - measure time.
+%% The IOPS reported is the number of log events written per millisecond.
+disk_log_handler_time(Config) ->
+ measure_handler_time(logger_disk_log_h,#{type=>halt},Config).
+disk_log_handler_time(cleanup,_Config) ->
+ _ = file:delete("default"),
+ ok.
+
+disk_log_handler_time_big(Config) ->
+ measure_handler_time_big(logger_disk_log_h,#{type=>halt},Config).
+disk_log_handler_time_big(cleanup,_Config) ->
+ _ = file:delete("default"),
+ ok.
+
%% Cascading failure that produce log events from the emulator - how
%% many of the produced log events pass through the proxy.
emulator_events(Config) ->
@@ -221,6 +253,68 @@ remote_emulator_to_disk_log(cleanup,_Config) ->
%%%-----------------------------------------------------------------
%%% Internal functions
+measure_handler_time(Module,HCfg,Config) ->
+ measure_handler_time(Module,100000,small_fa(),millisecond,HCfg,#{},Config).
+
+measure_handler_time_big(Module,HCfg,Config) ->
+ FCfg = #{chars_limit=>4096, max_size=>1024},
+ measure_handler_time(Module,100,big_fa(),second,HCfg,FCfg,Config).
+
+measure_handler_time(Module,N,FA,Unit,HCfg,FCfg,Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,
+ [{handler,default,Module,
+ #{formatter => {logger_formatter,
+ maps:merge(#{legacy_header=>false,
+ single_line=>true},FCfg)},
+ config=>maps:merge(#{sync_mode_qlen => N+1,
+ drop_mode_qlen => N+1,
+ flush_qlen => N+1,
+ burst_limit_enable => false,
+ filesync_repeat_interval => no_repeat},
+ HCfg)}}]}]),
+ %% HPid = rpc:call(Node,erlang,whereis,[?name_to_reg_name(Module,default)]),
+ %% {links,[_,FCPid]} = rpc:call(Node,erlang,process_info,[HPid,links]),
+ T0 = erlang:monotonic_time(millisecond),
+ ok = rpc:call(Node,?MODULE,nlogs_wait,[N,FA,Module]),
+ %% ok = rpc:call(Node,fprof,apply,[fun ?MODULE:nlogs_wait/2,[N div 10,FA,Module],[{procs,[HPid,FCPid]}]]),
+ T1 = erlang:monotonic_time(millisecond),
+ T = T1-T0,
+ M = case Unit of
+ millisecond -> 1;
+ second -> 1000
+ end,
+ IOPS = M*N/T,
+ ct:pal("N: ~p~nT: ~p~nIOPS: ~.2f events pr ~w",[N,T,IOPS,Unit]),
+ %% Stats = rpc:call(Node,logger_olp,info,[?name_to_reg_name(Module,default)]),
+ %% ct:pal("Stats: ~p",[Stats]),
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,IOPS}]}),
+ {comment,io_lib:format("~.2f events written pr ~w",[IOPS,Unit])}.
+
+nlogs_wait(N,{F,A},Module) ->
+ group_leader(whereis(user),self()),
+ [?LOG_NOTICE(F,A) || _ <- lists:seq(1,N)],
+ wait(Module).
+
+wait(Module) ->
+ case Module:filesync(default) of
+ {error,handler_busy} ->
+ wait(Module);
+ ok ->
+ ok
+ end.
+
+small_fa() ->
+ Str = "\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "[\\]^_`abcdefghijklmnopqr",
+ {"~ts",[Str]}.
+
+big_fa() ->
+ {"~p",[lists:duplicate(1048576,"a")]}.
+
nlogs(N) ->
group_leader(whereis(user),self()),
Str = "\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ"
diff --git a/lib/kernel/vsn.mk b/lib/kernel/vsn.mk
index 4b43c6ae9d..6e0eea7824 100644
--- a/lib/kernel/vsn.mk
+++ b/lib/kernel/vsn.mk
@@ -1 +1 @@
-KERNEL_VSN = 6.2
+KERNEL_VSN = 6.2.1
diff --git a/lib/ssh/test/ssh_protocol_SUITE.erl b/lib/ssh/test/ssh_protocol_SUITE.erl
index 3e3e151781..b12ddfeef6 100644
--- a/lib/ssh/test/ssh_protocol_SUITE.erl
+++ b/lib/ssh/test/ssh_protocol_SUITE.erl
@@ -38,7 +38,11 @@
-define(EXTRA_KEX, 'diffie-hellman-group1-sha1').
-define(CIPHERS, ['aes256-ctr','aes192-ctr','aes128-ctr','aes128-cbc','3des-cbc']).
--define(DEFAULT_CIPHERS, [{client2server,?CIPHERS}, {server2client,?CIPHERS}]).
+-define(DEFAULT_CIPHERS, (fun() -> Ciphs = filter_supported(cipher, ?CIPHERS),
+ [{client2server,Ciphs}, {server2client,Ciphs}]
+ end)()
+ ).
+
-define(v(Key, Config), proplists:get_value(Key, Config)).
-define(v(Key, Config, Default), proplists:get_value(Key, Config, Default)).
diff --git a/lib/stdlib/src/io_lib_pretty.erl b/lib/stdlib/src/io_lib_pretty.erl
index 5483ea87b5..8f2fd7ea8f 100644
--- a/lib/stdlib/src/io_lib_pretty.erl
+++ b/lib/stdlib/src/io_lib_pretty.erl
@@ -721,7 +721,7 @@ printable_list(_L, 1, _T, _Enc) ->
printable_list(L, _D, T, latin1) when T < 0 ->
io_lib:printable_latin1_list(L);
printable_list(L, _D, T, Enc) when T >= 0 ->
- case slice(L, tsub(T, 2)) of
+ case slice(L, tsub(T, 2), Enc) of
false ->
false;
{prefix, Prefix} when Enc =:= latin1 ->
@@ -737,20 +737,46 @@ printable_list(L, _D, T, Enc) when T >= 0 ->
printable_list(L, _D, T, _Uni) when T < 0->
io_lib:printable_list(L).
-slice(L, N) ->
- try io_lib:chars_length(L) =< N of
- true ->
+slice(L, N, latin1) ->
+ try lists:split(N, L) of
+ {_, []} ->
all;
- false ->
- case string:slice(L, 0, N) of
- "" ->
- false;
- Prefix ->
- {prefix, Prefix}
+ {[], _} ->
+ false;
+ {L1, _} ->
+ {prefix, L1}
+ catch
+ _:_ ->
+ all
+ end;
+slice(L, N, _Uni) ->
+ %% Be careful not to traverse more of L than necessary.
+ try string:slice(L, 0, N) of
+ "" ->
+ false;
+ Prefix ->
+ %% Assume no binaries are introduced by string:slice().
+ case is_flat(L, lists:flatlength(Prefix)) of
+ true ->
+ case string:equal(Prefix, L) of
+ true ->
+ all;
+ false ->
+ {prefix, Prefix}
+ end;
+ false ->
+ false
end
catch _:_ -> false
end.
+is_flat(_L, 0) ->
+ true;
+is_flat([C|Cs], N) when is_integer(C) ->
+ is_flat(Cs, N - 1);
+is_flat(_, _N) ->
+ false.
+
printable_bin0(Bin, D, T, Enc) ->
Len = case D >= 0 of
true ->
diff --git a/lib/stdlib/test/io_SUITE.erl b/lib/stdlib/test/io_SUITE.erl
index f097552e8c..824f5d19f2 100644
--- a/lib/stdlib/test/io_SUITE.erl
+++ b/lib/stdlib/test/io_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -32,7 +32,7 @@
io_with_huge_message_queue/1, format_string/1,
maps/1, coverage/1, otp_14178_unicode_atoms/1, otp_14175/1,
otp_14285/1, limit_term/1, otp_14983/1, otp_15103/1, otp_15076/1,
- otp_15159/1]).
+ otp_15159/1, otp_15639/1]).
-export([pretty/2, trf/3]).
@@ -64,7 +64,8 @@ all() ->
io_lib_print_binary_depth_one, otp_10302, otp_10755, otp_10836,
io_lib_width_too_small, io_with_huge_message_queue,
format_string, maps, coverage, otp_14178_unicode_atoms, otp_14175,
- otp_14285, limit_term, otp_14983, otp_15103, otp_15076, otp_15159].
+ otp_14285, limit_term, otp_14983, otp_15103, otp_15076, otp_15159,
+ otp_15639].
%% Error cases for output.
error_1(Config) when is_list(Config) ->
@@ -2647,3 +2648,35 @@ otp_15076(_Config) ->
{'EXIT', {badarg, _}} = (catch io_lib:build_text(L)),
{'EXIT', {badarg, _}} = (catch io_lib:build_text(L, [])),
ok.
+
+otp_15639(_Config) ->
+ L = lists:duplicate(10, "a"),
+ LOpts = [{encoding, latin1}, {chars_limit, 10}],
+ UOpts = [{encoding, unicode}, {chars_limit, 10}],
+ "[[...]|...]" = pretty(L, LOpts),
+ "[[...]|...]" = pretty(L, UOpts),
+ "[\"a\",[...]|...]" =
+ pretty(L, [{chars_limit, 12}, {encoding, latin1}]),
+ "[\"a\",[...]|...]" =
+ pretty(L, [{chars_limit, 12}, {encoding, unicode}]),
+
+ %% Latin-1
+ "\"12345678\"" = pretty("12345678", LOpts),
+ "\"12345678\"..." = pretty("123456789", LOpts),
+ "\"\\r\\n123456\"..." = pretty("\r\n1234567", LOpts),
+ "\"\\r1234567\"..." = pretty("\r12345678", LOpts),
+ "\"\\r\\n123456\"..." = pretty("\r\n12345678", LOpts),
+ "\"12345678\"..." = pretty("12345678"++[x], LOpts),
+ "[49,50|...]" = pretty("1234567"++[x], LOpts),
+ "[49,x]" = pretty("1"++[x], LOpts),
+ "[[...]|...]" = pretty(["1","2","3","4","5","6","7","8"], LOpts),
+ %% Unicode
+ "\"12345678\"" = pretty("12345678", UOpts),
+ "\"12345678\"..." = pretty("123456789", UOpts),
+ "\"\\r\\n1234567\"" = pretty("\r\n1234567", UOpts),
+ "\"\\r1234567\"..." = pretty("\r12345678", UOpts),
+ "\"\\r\\n1234567\"..." = pretty("\r\n12345678", UOpts),
+ "[49,50|...]" = pretty("12345678"++[x], UOpts),
+ "\"12345678\"..." = pretty("123456789"++[x], UOpts),
+ "[[...]|...]" = pretty(["1","2","3","4","5","6","7","8"], UOpts),
+ ok.
diff --git a/lib/tools/c_src/Makefile.in b/lib/tools/c_src/Makefile.in
index cfe91917f8..289322b6fa 100644
--- a/lib/tools/c_src/Makefile.in
+++ b/lib/tools/c_src/Makefile.in
@@ -29,7 +29,6 @@ CC=@CC@
LD=@LD@
AR=@AR@
RANLIB=@RANLIB@
-RM=@RM@
MKDIR=@MKDIR@
INSTALL=@INSTALL@
INSTALL_DIR=@INSTALL_DIR@
@@ -158,9 +157,9 @@ $(ERTS_LIB):
docs:
clean:
- $(RM) -rf ../obj/*
- $(RM) -rf ../bin/*
- $(RM) -f ./*~
+ $(RM) -r ../obj/*
+ $(RM) -r ../bin/*
+ $(RM) ./*~
.PHONY: all erts_lib docs clean
diff --git a/make/otp.mk.in b/make/otp.mk.in
index fb573680c8..ceff8f7c31 100644
--- a/make/otp.mk.in
+++ b/make/otp.mk.in
@@ -274,7 +274,6 @@ DEFAULT_GIF_FILES = $(HTMLDIR)/min_head.gif
XSLTPROC = @XSLTPROC@
FOP = @FOP@
XMLLINT = @XMLLINT@
-RM = @RM@
CP = @CP@
MKDIR = @MKDIR@
diff --git a/otp_build b/otp_build
index b1c5bcc939..3dfe24a299 100755
--- a/otp_build
+++ b/otp_build
@@ -30,7 +30,7 @@ AUTOCONF_SUBDIRS="$AUTOCONF_SUBDIRS make erts"
# partly built in one of the bootstrap phases. Applications that
# only get some static includes copied into the bootstrap directory
# should not be included.
-bootstrap_apps="erts lib/asn1 lib/compiler lib/hipe lib/ic lib/kernel lib/parsetools lib/sasl lib/snmp lib/stdlib lib/syntax_tools"
+bootstrap_apps="erts lib/asn1 lib/compiler lib/erl_interface lib/hipe lib/kernel lib/jinterface lib/parsetools lib/sasl lib/snmp lib/stdlib lib/syntax_tools lib/wx"
# We will quote a bit more than needed, but the important thing is that
# all that needs quoting will be quoted...
diff --git a/otp_versions.table b/otp_versions.table
index aea1e8d5cd..0f79284903 100644
--- a/otp_versions.table
+++ b/otp_versions.table
@@ -1,3 +1,4 @@
+OTP-21.2.7 : erts-10.2.5 kernel-6.2.1 # asn1-5.0.8 common_test-1.16.1 compiler-7.3.1 crypto-4.4 debugger-4.2.6 dialyzer-3.3.1 diameter-2.1.6 edoc-0.9.4 eldap-1.2.6 erl_docgen-0.8.1 erl_interface-3.10.4 et-1.6.4 eunit-2.3.7 ftp-1.0.1 hipe-3.18.2 inets-7.0.5 jinterface-1.9.1 megaco-3.18.4 mnesia-4.15.5 observer-2.8.2 odbc-2.12.2 os_mon-2.4.7 otp_mibs-1.2.1 parsetools-2.1.8 public_key-1.6.4 reltool-0.7.8 runtime_tools-1.13.1 sasl-3.3 snmp-5.2.12 ssh-4.7.3 ssl-9.1.2 stdlib-3.7.1 syntax_tools-2.1.6 tftp-1.0.1 tools-3.0.2 wx-1.8.6 xmerl-1.3.19 :
OTP-21.2.6 : erts-10.2.4 stdlib-3.7.1 # asn1-5.0.8 common_test-1.16.1 compiler-7.3.1 crypto-4.4 debugger-4.2.6 dialyzer-3.3.1 diameter-2.1.6 edoc-0.9.4 eldap-1.2.6 erl_docgen-0.8.1 erl_interface-3.10.4 et-1.6.4 eunit-2.3.7 ftp-1.0.1 hipe-3.18.2 inets-7.0.5 jinterface-1.9.1 kernel-6.2 megaco-3.18.4 mnesia-4.15.5 observer-2.8.2 odbc-2.12.2 os_mon-2.4.7 otp_mibs-1.2.1 parsetools-2.1.8 public_key-1.6.4 reltool-0.7.8 runtime_tools-1.13.1 sasl-3.3 snmp-5.2.12 ssh-4.7.3 ssl-9.1.2 syntax_tools-2.1.6 tftp-1.0.1 tools-3.0.2 wx-1.8.6 xmerl-1.3.19 :
OTP-21.2.5 : inets-7.0.5 # asn1-5.0.8 common_test-1.16.1 compiler-7.3.1 crypto-4.4 debugger-4.2.6 dialyzer-3.3.1 diameter-2.1.6 edoc-0.9.4 eldap-1.2.6 erl_docgen-0.8.1 erl_interface-3.10.4 erts-10.2.3 et-1.6.4 eunit-2.3.7 ftp-1.0.1 hipe-3.18.2 jinterface-1.9.1 kernel-6.2 megaco-3.18.4 mnesia-4.15.5 observer-2.8.2 odbc-2.12.2 os_mon-2.4.7 otp_mibs-1.2.1 parsetools-2.1.8 public_key-1.6.4 reltool-0.7.8 runtime_tools-1.13.1 sasl-3.3 snmp-5.2.12 ssh-4.7.3 ssl-9.1.2 stdlib-3.7 syntax_tools-2.1.6 tftp-1.0.1 tools-3.0.2 wx-1.8.6 xmerl-1.3.19 :
OTP-21.2.4 : erts-10.2.3 inets-7.0.4 # asn1-5.0.8 common_test-1.16.1 compiler-7.3.1 crypto-4.4 debugger-4.2.6 dialyzer-3.3.1 diameter-2.1.6 edoc-0.9.4 eldap-1.2.6 erl_docgen-0.8.1 erl_interface-3.10.4 et-1.6.4 eunit-2.3.7 ftp-1.0.1 hipe-3.18.2 jinterface-1.9.1 kernel-6.2 megaco-3.18.4 mnesia-4.15.5 observer-2.8.2 odbc-2.12.2 os_mon-2.4.7 otp_mibs-1.2.1 parsetools-2.1.8 public_key-1.6.4 reltool-0.7.8 runtime_tools-1.13.1 sasl-3.3 snmp-5.2.12 ssh-4.7.3 ssl-9.1.2 stdlib-3.7 syntax_tools-2.1.6 tftp-1.0.1 tools-3.0.2 wx-1.8.6 xmerl-1.3.19 :
diff --git a/system/doc/design_principles/statem.xml b/system/doc/design_principles/statem.xml
index 07b4284cb8..45ea972ff2 100644
--- a/system/doc/design_principles/statem.xml
+++ b/system/doc/design_principles/statem.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2016</year><year>2018</year>
+ <year>2016</year><year>2019</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -2415,17 +2415,18 @@ handle_event(enter, _OldState, {open,_}, _Data) ->
<seealso marker="#Event Time-Outs">event time-out</seealso>
to trigger hibernation after a certain time of inactivity.
There is also a server start option
- <seealso marker="stdlib:gen_statem#type-hibernate_after_opt">
+ <seealso marker="stdlib:gen_statem#type-enter_loop_opt">
<c>{hibernate_after, Timeout}</c>
</seealso>
for
- <seealso marker="stdlib:gen_statem#start/3">
- <c>start/3,4</c>
- </seealso>
- or
+ <seealso marker="stdlib:gen_statem#start/3"><c>start/3,4</c></seealso>,
<seealso marker="stdlib:gen_statem#start_link/3">
<c>start_link/3,4</c>
</seealso>
+ or
+ <seealso marker="stdlib:gen_statem#enter_loop/4">
+ <c>enter_loop/4,5,6</c>
+ </seealso>
that may be used to automatically hibernate the server.
</p>
<p>
diff --git a/system/doc/general_info/deprecations.xml b/system/doc/general_info/deprecations.xml
index ccfd553bad..c748e60059 100644
--- a/system/doc/general_info/deprecations.xml
+++ b/system/doc/general_info/deprecations.xml
@@ -61,6 +61,27 @@
library has also been <seealso marker="scheduled_for_removal#OTP-23">scheduled
for removal</seealso>.</p>
</section>
+ <section>
+ <title>System Events</title>
+ <p>
+ The format of "System Events" as defined in the man page for
+ <seealso marker="stdlib:sys">sys</seealso>
+ has been clarified and cleaned up.
+ Due to this, code that relied on the internal badly
+ documented previous (before this change) format
+ of OTP's "System Events", needs to be changed.
+ </p>
+ <p>
+ In the wake of this the function
+ <seealso marker="stdlib:sys#get_debug/3">sys:get_debug/3</seealso>
+ that returns data with undocumented and internal format
+ (and therefore is practically useless) has been deprecated,
+ and a new function
+ <seealso marker="stdlib:sys#get_log/1">sys:get_log/1</seealso>
+ has been added,
+ that hopefully does what the deprecated function was intended for.
+ </p>
+ </section>
</section>
<section>
<marker id="OTP-18"/>
diff --git a/system/doc/top/Makefile b/system/doc/top/Makefile
index a978413e11..e3f9c4710a 100644
--- a/system/doc/top/Makefile
+++ b/system/doc/top/Makefile
@@ -264,13 +264,13 @@ html: $(INDEX_FILES) \
debug opt:
clean:
- $(RM) -f ../html/js/*.js
- $(RM) -f PR.template
- $(RM) -f $(XMLDIR)/*.xml
- $(RM) -f $(INDEX_FILES) $(MAN_INDEX)
- $(RM) -f $(TOP_PDF_FILE) $(TOP_PDF_FILE:%.pdf=%.fo)
- $(RM) -f $(INDEX_SCRIPT) $(GLOSSARY_SCRIPT) $(JAVASCRIPT_BUILD_SCRIPT)
- $(RM) -f erl_crash.dump errs core *~
+ $(RM) ../html/js/*.js
+ $(RM) PR.template
+ $(RM) $(XMLDIR)/*.xml
+ $(RM) $(INDEX_FILES) $(MAN_INDEX)
+ $(RM) $(TOP_PDF_FILE) $(TOP_PDF_FILE:%.pdf=%.fo)
+ $(RM) $(INDEX_SCRIPT) $(GLOSSARY_SCRIPT) $(JAVASCRIPT_BUILD_SCRIPT)
+ $(RM) erl_crash.dump errs core *~
# ----------------------------------------------------
# Release Target
@@ -294,7 +294,7 @@ release_docs_spec: docs
$(INSTALL_DATA) $(INDEX_SCRIPT) $(MAN_INDEX_SCRIPT) $(JAVASCRIPT_BUILD_SCRIPT) \
$(INDEX_SRC) $(MAN_INDEX_SRC) $(JAVASCRIPT_BUILD_SCRIPT_SRC) \
$(TEMPLATES) $(RELSYSDIR)/docbuild
- $(RM) -rf $(RELSYSDIR)/temporary
+ $(RM) -r $(RELSYSDIR)/temporary
release_spec: