aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--erts/emulator/beam/arith_instrs.tab11
-rw-r--r--erts/emulator/beam/beam_emu.c14
-rw-r--r--erts/emulator/beam/beam_load.c119
-rw-r--r--erts/emulator/beam/bif_instrs.tab4
-rw-r--r--erts/emulator/beam/bs_instrs.tab363
-rw-r--r--erts/emulator/beam/erl_bif_info.c10
-rw-r--r--erts/emulator/beam/erl_db_hash.c12
-rw-r--r--erts/emulator/beam/erl_db_tree.c14
-rw-r--r--erts/emulator/beam/instrs.tab21
-rw-r--r--erts/emulator/beam/ops.tab134
-rw-r--r--erts/emulator/beam/sys.h17
-rwxr-xr-xerts/emulator/utils/beam_makeops2
-rw-r--r--lib/crypto/c_src/atoms.c24
-rw-r--r--lib/crypto/c_src/atoms.h12
-rw-r--r--lib/crypto/c_src/cipher.c93
-rw-r--r--lib/crypto/c_src/cipher.h4
-rw-r--r--lib/crypto/c_src/crypto.c2
-rw-r--r--lib/crypto/c_src/hash.c26
-rw-r--r--lib/crypto/c_src/hash.h1
-rw-r--r--lib/crypto/src/crypto.erl15
-rw-r--r--lib/crypto/test/crypto_SUITE.erl21
-rw-r--r--lib/diameter/doc/src/diameter.xml24
-rw-r--r--lib/diameter/doc/src/diameter_transport.xml31
-rw-r--r--lib/diameter/src/base/diameter.erl4
-rw-r--r--lib/diameter/src/base/diameter_callback.erl4
-rw-r--r--lib/diameter/src/base/diameter_dist.erl525
-rw-r--r--lib/diameter/src/base/diameter_misc_sup.erl3
-rw-r--r--lib/diameter/src/base/diameter_traffic.erl97
-rw-r--r--lib/diameter/src/diameter.appup.src6
-rw-r--r--lib/diameter/src/modules.mk3
-rw-r--r--lib/diameter/test/diameter_dist_SUITE.erl332
-rw-r--r--lib/diameter/test/diameter_distribution_SUITE.erl7
-rw-r--r--lib/diameter/test/diameter_pool_SUITE.erl3
-rw-r--r--lib/diameter/test/diameter_traffic_SUITE.erl5
-rw-r--r--lib/diameter/test/modules.mk3
-rw-r--r--lib/ftp/test/ftp_SUITE.erl38
-rw-r--r--lib/kernel/doc/src/kernel_app.xml18
-rw-r--r--lib/kernel/doc/src/logger.xml2
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml14
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml122
-rw-r--r--lib/kernel/src/kernel.erl72
-rw-r--r--lib/kernel/src/logger_h_common.erl35
-rw-r--r--lib/kernel/src/logger_std_h.erl503
-rw-r--r--lib/kernel/test/kernel_config_SUITE.erl21
-rw-r--r--lib/kernel/test/logger_SUITE.erl14
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl428
-rw-r--r--lib/ssl/src/ssl_connection.erl5
-rw-r--r--lib/stdlib/test/ets_SUITE.erl57
48 files changed, 2763 insertions, 532 deletions
diff --git a/erts/emulator/beam/arith_instrs.tab b/erts/emulator/beam/arith_instrs.tab
index 574fceec5b..5f23b2c168 100644
--- a/erts/emulator/beam/arith_instrs.tab
+++ b/erts/emulator/beam/arith_instrs.tab
@@ -116,6 +116,17 @@ increment.execute(IncrementVal, Dst) {
i_times(Fail, Op1, Op2, Dst) {
Eterm op1 = $Op1;
Eterm op2 = $Op2;
+#ifdef HAVE_OVERFLOW_CHECK_BUILTINS
+ if (ERTS_LIKELY(is_both_small(op1, op2))) {
+ Sint a = signed_val(op1);
+ Sint b = signed_val(op2);
+ Sint res;
+ if (ERTS_LIKELY(!__builtin_mul_overflow(a, b, &res) && IS_SSMALL(res))) {
+ $Dst = make_small(res);
+ $NEXT0();
+ }
+ }
+#endif
$OUTLINED_ARITH_2($Fail, mixed_times, BIF_stimes_2, op1, op2, $Dst);
}
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 90162a6543..04a2a83123 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -322,19 +322,19 @@ void** beam_ops;
#define Arg(N) I[(N)+1]
-#define GetR(pos, tr) \
+#define GetSource(raw, dst) \
do { \
- tr = Arg(pos); \
- switch (loader_tag(tr)) { \
+ dst = raw; \
+ switch (loader_tag(dst)) { \
case LOADER_X_REG: \
- tr = x(loader_x_reg_index(tr)); \
+ dst = x(loader_x_reg_index(dst)); \
break; \
case LOADER_Y_REG: \
- ASSERT(loader_y_reg_index(tr) >= 1); \
- tr = y(loader_y_reg_index(tr)); \
+ ASSERT(loader_y_reg_index(dst) >= 1); \
+ dst = y(loader_y_reg_index(dst)); \
break; \
} \
- CHECK_TERM(tr); \
+ CHECK_TERM(dst); \
} while (0)
#define PUT_TERM_REG(term, desc) \
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index f4eeb54a1b..43adf7a5e0 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -3295,11 +3295,11 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
} else {
op->op = genop_i_bs_get_integer_6;
op->arity = 6;
- op->a[0] = Fail;
- op->a[1] = Live;
- op->a[2].type = TAG_u;
- op->a[2].val = (Unit.val << 3) | Flags.val;
- op->a[3] = Ms;
+ op->a[0] = Ms;
+ op->a[1] = Fail;
+ op->a[2] = Live;
+ op->a[3].type = TAG_u;
+ op->a[3].val = (Unit.val << 3) | Flags.val;
op->a[4] = Size;
op->a[5] = Dst;
op->next = NULL;
@@ -3332,8 +3332,8 @@ gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
} else {
op->op = genop_i_bs_get_binary_all2_5;
op->arity = 5;
- op->a[0] = Fail;
- op->a[1] = Ms;
+ op->a[0] = Ms;
+ op->a[1] = Fail;
op->a[2] = Live;
op->a[3] = Unit;
op->a[4] = Dst;
@@ -3341,8 +3341,8 @@ gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
} else if (Size.type == TAG_i) {
op->op = genop_i_bs_get_binary_imm2_6;
op->arity = 6;
- op->a[0] = Fail;
- op->a[1] = Ms;
+ op->a[0] = Ms;
+ op->a[1] = Fail;
op->a[2] = Live;
op->a[3].type = TAG_u;
if (!safe_mul(Size.val, Unit.val, &op->a[3].val)) {
@@ -3362,8 +3362,8 @@ gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
} else {
op->op = genop_i_bs_get_binary_imm2_6;
op->arity = 6;
- op->a[0] = Fail;
- op->a[1] = Ms;
+ op->a[0] = Ms;
+ op->a[1] = Fail;
op->a[2] = Live;
op->a[3].type = TAG_u;
if (!safe_mul(bigval, Unit.val, &op->a[3].val)) {
@@ -3375,8 +3375,8 @@ gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
} else {
op->op = genop_i_bs_get_binary2_6;
op->arity = 6;
- op->a[0] = Fail;
- op->a[1] = Ms;
+ op->a[0] = Ms;
+ op->a[1] = Fail;
op->a[2] = Live;
op->a[3] = Size;
op->a[4].type = TAG_u;
@@ -3409,8 +3409,8 @@ gen_put_binary(LoaderState* stp, GenOpArg Fail,GenOpArg Size,
if (Size.type == TAG_a && Size.val == am_all) {
op->op = genop_i_new_bs_put_binary_all_3;
op->arity = 3;
- op->a[0] = Fail;
- op->a[1] = Src;
+ op->a[0] = Src;
+ op->a[1] = Fail;
op->a[2] = Unit;
} else if (Size.type == TAG_i) {
op->op = genop_i_new_bs_put_binary_imm_3;
@@ -3420,10 +3420,33 @@ gen_put_binary(LoaderState* stp, GenOpArg Fail,GenOpArg Size,
if (safe_mul(Size.val, Unit.val, &op->a[1].val)) {
op->a[2] = Src;
} else {
+ error:
op->op = genop_badarg_1;
op->arity = 1;
op->a[0] = Fail;
}
+ } else if (Size.type == TAG_q) {
+#ifdef ARCH_64
+ /*
+ * There is no way that this binary would fit in memory.
+ */
+ goto error;
+#else
+ Eterm big = stp->literals[Size.val].term;
+ Uint bigval;
+ Uint size;
+
+ if (!term_to_Uint(big, &bigval) ||
+ !safe_mul(bigval, Unit.val, &size)) {
+ goto error;
+ }
+ op->op = genop_i_new_bs_put_binary_imm_3;
+ op->arity = 3;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
+ op->a[1].val = size;
+ op->a[2] = Src;
+#endif
} else {
op->op = genop_i_new_bs_put_binary_4;
op->arity = 4;
@@ -3448,11 +3471,8 @@ gen_put_integer(LoaderState* stp, GenOpArg Fail, GenOpArg Size,
NATIVE_ENDIAN(Flags);
/* Negative size must fail */
if (Size.type == TAG_i) {
- op->op = genop_i_new_bs_put_integer_imm_4;
- op->arity = 4;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
- if (!safe_mul(Size.val, Unit.val, &op->a[1].val)) {
+ Uint size;
+ if (!safe_mul(Size.val, Unit.val, &size)) {
error:
op->op = genop_badarg_1;
op->arity = 1;
@@ -3460,26 +3480,31 @@ gen_put_integer(LoaderState* stp, GenOpArg Fail, GenOpArg Size,
op->next = NULL;
return op;
}
- op->a[1].val = Size.val * Unit.val;
- op->a[2].type = Flags.type;
- op->a[2].val = (Flags.val & 7);
- op->a[3] = Src;
+ op->op = genop_i_new_bs_put_integer_imm_4;
+ op->arity = 4;
+ op->a[0] = Src;
+ op->a[1] = Fail;
+ op->a[2].type = TAG_u;
+ op->a[2].val = size;
+ op->a[3].type = Flags.type;
+ op->a[3].val = (Flags.val & 7);
} else if (Size.type == TAG_q) {
Eterm big = stp->literals[Size.val].term;
Uint bigval;
+ Uint size;
- if (!term_to_Uint(big, &bigval)) {
+ if (!term_to_Uint(big, &bigval) ||
+ !safe_mul(bigval, Unit.val, &size)) {
goto error;
- } else {
- op->op = genop_i_new_bs_put_integer_imm_4;
- op->arity = 4;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
- op->a[1].val = bigval * Unit.val;
- op->a[2].type = Flags.type;
- op->a[2].val = (Flags.val & 7);
- op->a[3] = Src;
}
+ op->op = genop_i_new_bs_put_integer_imm_4;
+ op->arity = 4;
+ op->a[0] = Src;
+ op->a[1] = Fail;
+ op->a[2].type = TAG_u;
+ op->a[2].val = size;
+ op->a[3].type = Flags.type;
+ op->a[3].val = (Flags.val & 7);
} else {
op->op = genop_i_new_bs_put_integer_4;
op->arity = 4;
@@ -3541,8 +3566,8 @@ gen_get_float2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
NATIVE_ENDIAN(Flags);
op->op = genop_i_bs_get_float2_6;
op->arity = 6;
- op->a[0] = Fail;
- op->a[1] = Ms;
+ op->a[0] = Ms;
+ op->a[1] = Fail;
op->a[2] = Live;
op->a[3] = Size;
op->a[4].type = TAG_u;
@@ -3565,10 +3590,22 @@ gen_skip_bits2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms,
NATIVE_ENDIAN(Flags);
NEW_GENOP(stp, op);
if (Size.type == TAG_a && Size.val == am_all) {
- op->op = genop_i_bs_skip_bits_all2_3;
+ /*
+ * This kind of skip instruction will only be found in modules
+ * compiled before OTP 19. From OTP 19, the compiler generates
+ * a test_unit instruction of a bs_skip at the end of a
+ * binary.
+ *
+ * It is safe to replace the skip instruction with a test_unit
+ * instruction, because the position will never be used again.
+ * If the match context itself is used again, it will be used by
+ * a bs_restore2 instruction which will overwrite the position
+ * by one of the stored positions.
+ */
+ op->op = genop_bs_test_unit_3;
op->arity = 3;
op->a[0] = Fail;
- op->a[1] = Ms;
+ op->a[1] = Ms;
op->a[2] = Unit;
} else if (Size.type == TAG_i) {
op->op = genop_i_bs_skip_bits_imm2_3;
@@ -3601,9 +3638,9 @@ gen_skip_bits2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms,
} else {
op->op = genop_i_bs_skip_bits2_4;
op->arity = 4;
- op->a[0] = Fail;
- op->a[1] = Ms;
- op->a[2] = Size;
+ op->a[0] = Ms;
+ op->a[1] = Size;
+ op->a[2] = Fail;
op->a[3] = Unit;
}
op->next = NULL;
diff --git a/erts/emulator/beam/bif_instrs.tab b/erts/emulator/beam/bif_instrs.tab
index 418bbe2b23..8499f61114 100644
--- a/erts/emulator/beam/bif_instrs.tab
+++ b/erts/emulator/beam/bif_instrs.tab
@@ -269,7 +269,7 @@ call_bif(Exp) {
CHECK_TERM(r(0));
$NEXT0();
} else if (c_p->freason == TRAP) {
- SET_CP(c_p, I+2);
+ SET_CP(c_p, $NEXT_INSTRUCTION);
SET_I(c_p->i);
SWAPIN;
Dispatch();
@@ -313,7 +313,7 @@ send() {
r(0) = result;
CHECK_TERM(r(0));
} else if (c_p->freason == TRAP) {
- SET_CP(c_p, I+1);
+ SET_CP(c_p, $NEXT_INSTRUCTION);
SET_I(c_p->i);
SWAPIN;
Dispatch();
diff --git a/erts/emulator/beam/bs_instrs.tab b/erts/emulator/beam/bs_instrs.tab
index 2dde70c2e1..652460a66d 100644
--- a/erts/emulator/beam/bs_instrs.tab
+++ b/erts/emulator/beam/bs_instrs.tab
@@ -21,12 +21,57 @@
%if ARCH_64
BS_SAFE_MUL(A, B, Fail, Dst) {
- Uint64 res = ($A) * ($B);
- if (res / $B != $A) {
+ Uint a = $A;
+ Uint b = $B;
+ Uint res;
+#ifdef HAVE_OVERFLOW_CHECK_BUILTINS
+ if (__builtin_mul_overflow(a, b, &res)) {
+ $Fail;
+ }
+#else
+ res = a * b;
+ if (res / b != a) {
$Fail;
}
+#endif
$Dst = res;
}
+
+BS_GET_FIELD_SIZE(Bits, Unit, Fail, Dst) {
+ if (is_small($Bits)) {
+ Uint uint_size;
+ Sint signed_size = signed_val($Bits);
+ if (signed_size < 0) {
+ $Fail;
+ }
+ uint_size = (Uint) signed_size;
+ $BS_SAFE_MUL(uint_size, $Unit, $Fail, $Dst);
+ } else {
+ /*
+ * On a 64-bit architecture, the size of any binary
+ * that would fit in the memory fits in a small.
+ */
+ $Fail;
+ }
+}
+
+BS_GET_UNCHECKED_FIELD_SIZE(Bits, Unit, Fail, Dst) {
+ if (is_small($Bits)) {
+ Uint uint_size;
+ Sint signed_size = signed_val($Bits);
+ if (signed_size < 0) {
+ $Fail;
+ }
+ uint_size = (Uint) signed_size;
+ $Dst = uint_size * $Unit;
+ } else {
+ /*
+ * On a 64-bit architecture, the size of any binary
+ * that would fit in the memory fits in a small.
+ */
+ $Fail;
+ }
+}
%else
BS_SAFE_MUL(A, B, Fail, Dst) {
Uint64 res = (Uint64)($A) * (Uint64)($B);
@@ -35,7 +80,6 @@ BS_SAFE_MUL(A, B, Fail, Dst) {
}
$Dst = res;
}
-%endif
BS_GET_FIELD_SIZE(Bits, Unit, Fail, Dst) {
Sint signed_size;
@@ -76,6 +120,7 @@ BS_GET_UNCHECKED_FIELD_SIZE(Bits, Unit, Fail, Dst) {
}
$Dst = uint_size * $Unit;
}
+%endif
TEST_BIN_VHEAP(VNh, Nh, Live) {
Uint need = $Nh;
@@ -90,12 +135,22 @@ TEST_BIN_VHEAP(VNh, Nh, Live) {
HEAP_SPACE_VERIFIED(need);
}
-i_bs_get_binary_all2(Fail, Ms, Live, Unit, Dst) {
+i_bs_get_binary_all2 := i_bs_get_binary_all2.fetch.execute;
+
+i_bs_get_binary_all2.head() {
+ Eterm context;
+}
+
+i_bs_get_binary_all2.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_binary_all2.execute(Fail, Live, Unit, Dst) {
ErlBinMatchBuffer *_mb;
Eterm _result;
- $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live);
- _mb = ms_matchbuffer($Ms);
+ $GC_TEST_PRESERVE(ERL_SUB_BIN_SIZE, $Live, context);
+ _mb = ms_matchbuffer(context);
if (((_mb->size - _mb->offset) % $Unit) == 0) {
LIGHT_SWAPOUT;
_result = erts_bs_get_binary_all_2(c_p, _mb);
@@ -109,14 +164,23 @@ i_bs_get_binary_all2(Fail, Ms, Live, Unit, Dst) {
$FAIL($Fail);
}
}
+i_bs_get_binary2 := i_bs_get_binary2.fetch.execute;
+
+i_bs_get_binary2.head() {
+ Eterm context;
+}
+
+i_bs_get_binary2.fetch(Ctx) {
+ context = $Ctx;
+}
-i_bs_get_binary2(Fail, Ms, Live, Sz, Flags, Dst) {
+i_bs_get_binary2.execute(Fail, Live, Sz, Flags, Dst) {
ErlBinMatchBuffer *_mb;
Eterm _result;
Uint _size;
$BS_GET_FIELD_SIZE($Sz, (($Flags) >> 3), $FAIL($Fail), _size);
- $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live);
- _mb = ms_matchbuffer($Ms);
+ $GC_TEST_PRESERVE(ERL_SUB_BIN_SIZE, $Live, context);
+ _mb = ms_matchbuffer(context);
LIGHT_SWAPOUT;
_result = erts_bs_get_binary_2(c_p, _size, $Flags, _mb);
LIGHT_SWAPIN;
@@ -129,11 +193,22 @@ i_bs_get_binary2(Fail, Ms, Live, Sz, Flags, Dst) {
}
}
-i_bs_get_binary_imm2(Fail, Ms, Live, Sz, Flags, Dst) {
+i_bs_get_binary_imm2 := i_bs_get_binary_imm2.fetch.execute;
+
+i_bs_get_binary_imm2.head() {
+ Eterm context;
+}
+
+i_bs_get_binary_imm2.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_binary_imm2.execute(Fail, Live, Sz, Flags, Dst) {
ErlBinMatchBuffer *_mb;
Eterm _result;
- $GC_TEST(0, heap_bin_size(ERL_ONHEAP_BIN_LIMIT), $Live);
- _mb = ms_matchbuffer($Ms);
+ $GC_TEST_PRESERVE(heap_bin_size(ERL_ONHEAP_BIN_LIMIT),
+ $Live, context);
+ _mb = ms_matchbuffer(context);
LIGHT_SWAPOUT;
_result = erts_bs_get_binary_2(c_p, $Sz, $Flags, _mb);
LIGHT_SWAPIN;
@@ -145,8 +220,17 @@ i_bs_get_binary_imm2(Fail, Ms, Live, Sz, Flags, Dst) {
$Dst = _result;
}
}
+i_bs_get_float2 := i_bs_get_float2.fetch.execute;
+
+i_bs_get_float2.head() {
+ Eterm context;
+}
+
+i_bs_get_float2.fetch(Ctx) {
+ context = $Ctx;
+}
-i_bs_get_float2(Fail, Ms, Live, Sz, Flags, Dst) {
+i_bs_get_float2.execute(Fail, Live, Sz, Flags, Dst) {
ErlBinMatchBuffer *_mb;
Eterm _result;
Sint _size;
@@ -155,8 +239,8 @@ i_bs_get_float2(Fail, Ms, Live, Sz, Flags, Dst) {
$FAIL($Fail);
}
_size *= (($Flags) >> 3);
- $GC_TEST(0, FLOAT_SIZE_OBJECT, $Live);
- _mb = ms_matchbuffer($Ms);
+ $GC_TEST_PRESERVE(FLOAT_SIZE_OBJECT, $Live, context);
+ _mb = ms_matchbuffer(context);
LIGHT_SWAPOUT;
_result = erts_bs_get_float_2(c_p, _size, ($Flags), _mb);
LIGHT_SWAPIN;
@@ -169,13 +253,24 @@ i_bs_get_float2(Fail, Ms, Live, Sz, Flags, Dst) {
}
}
-i_bs_skip_bits2(Fail, Ms, Bits, Unit) {
+i_bs_skip_bits2 := i_bs_skip_bits2.fetch.execute;
+
+i_bs_skip_bits2.head() {
+ Eterm context, bits;
+}
+
+i_bs_skip_bits2.fetch(Ctx, Bits) {
+ context = $Ctx;
+ bits = $Bits;
+}
+
+i_bs_skip_bits2.execute(Fail, Unit) {
ErlBinMatchBuffer *_mb;
size_t new_offset;
Uint _size;
- _mb = ms_matchbuffer($Ms);
- $BS_GET_FIELD_SIZE($Bits, $Unit, $FAIL($Fail), _size);
+ _mb = ms_matchbuffer(context);
+ $BS_GET_FIELD_SIZE(bits, $Unit, $FAIL($Fail), _size);
new_offset = _mb->offset + _size;
if (new_offset <= _mb->size) {
_mb->offset = new_offset;
@@ -184,16 +279,6 @@ i_bs_skip_bits2(Fail, Ms, Bits, Unit) {
}
}
-i_bs_skip_bits_all2(Fail, Ms, Unit) {
- ErlBinMatchBuffer *_mb;
- _mb = ms_matchbuffer($Ms);
- if (((_mb->size - _mb->offset) % $Unit) == 0) {
- _mb->offset = _mb->size;
- } else {
- $FAIL($Fail);
- }
-}
-
i_bs_skip_bits_imm2(Fail, Ms, Bits) {
ErlBinMatchBuffer *_mb;
size_t new_offset;
@@ -207,15 +292,25 @@ i_bs_skip_bits_imm2(Fail, Ms, Bits) {
}
i_new_bs_put_binary(Fail, Sz, Flags, Src) {
+ Eterm sz = $Sz;
Sint _size;
- $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
+ $BS_GET_UNCHECKED_FIELD_SIZE(sz, (($Flags) >> 3), $BADARG($Fail), _size);
if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2(($Src), _size))) {
$BADARG($Fail);
}
}
+i_new_bs_put_binary_all := i_new_bs_put_binary_all.fetch.execute;
+
+i_new_bs_put_binary_all.head() {
+ Eterm src;
+}
+
+i_new_bs_put_binary_all.fetch(Src) {
+ src = $Src;
+}
-i_new_bs_put_binary_all(Fail, Src, Unit) {
- if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2(($Src), ($Unit)))) {
+i_new_bs_put_binary_all.execute(Fail, Unit) {
+ if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2(src, ($Unit)))) {
$BADARG($Fail);
}
}
@@ -227,9 +322,11 @@ i_new_bs_put_binary_imm(Fail, Sz, Src) {
}
i_new_bs_put_float(Fail, Sz, Flags, Src) {
+ Eterm sz = $Sz;
+ Eterm flags = $Flags;
Sint _size;
- $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
- if (!erts_new_bs_put_float(c_p, ($Src), _size, ($Flags))) {
+ $BS_GET_UNCHECKED_FIELD_SIZE(sz, (flags >> 3), $BADARG($Fail), _size);
+ if (!erts_new_bs_put_float(c_p, ($Src), _size, flags)) {
$BADARG($Fail);
}
}
@@ -241,15 +338,27 @@ i_new_bs_put_float_imm(Fail, Sz, Flags, Src) {
}
i_new_bs_put_integer(Fail, Sz, Flags, Src) {
- Sint _size;
- $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
- if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), _size, ($Flags)))) {
- $BADARG($Fail);
- }
+ Eterm sz = $Sz;
+ Eterm flags = $Flags;
+ Sint _size;
+ $BS_GET_UNCHECKED_FIELD_SIZE(sz, (flags >> 3), $BADARG($Fail), _size);
+ if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), _size, flags))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_integer_imm := i_new_bs_put_integer_imm.fetch.execute;
+
+i_new_bs_put_integer_imm.head() {
+ Eterm src;
}
-i_new_bs_put_integer_imm(Fail, Sz, Flags, Src) {
- if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), ($Sz), ($Flags)))) {
+i_new_bs_put_integer_imm.fetch(Src) {
+ src = $Src;
+}
+
+i_new_bs_put_integer_imm.execute(Fail, Sz, Flags) {
+ if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(src, ($Sz), ($Flags)))) {
$BADARG($Fail);
}
}
@@ -809,9 +918,19 @@ bs_test_unit8(Fail, Ctx) {
}
}
-i_bs_get_integer_8(Ctx, Fail, Dst) {
+i_bs_get_integer_8 := i_bs_get_integer_8.fetch.execute;
+
+i_bs_get_integer_8.head() {
+ Eterm context;
+}
+
+i_bs_get_integer_8.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_integer_8.execute(Fail, Dst) {
Eterm _result;
- ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+ ErlBinMatchBuffer* _mb = ms_matchbuffer(context);
if (_mb->size - _mb->offset < 8) {
$FAIL($Fail);
@@ -825,9 +944,19 @@ i_bs_get_integer_8(Ctx, Fail, Dst) {
$Dst = _result;
}
-i_bs_get_integer_16(Ctx, Fail, Dst) {
+i_bs_get_integer_16 := i_bs_get_integer_16.fetch.execute;
+
+i_bs_get_integer_16.head() {
+ Eterm context;
+}
+
+i_bs_get_integer_16.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_integer_16.execute(Fail, Dst) {
Eterm _result;
- ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+ ErlBinMatchBuffer* _mb = ms_matchbuffer(context);
if (_mb->size - _mb->offset < 16) {
$FAIL($Fail);
@@ -842,9 +971,19 @@ i_bs_get_integer_16(Ctx, Fail, Dst) {
}
%if ARCH_64
-i_bs_get_integer_32(Ctx, Fail, Dst) {
+i_bs_get_integer_32 := i_bs_get_integer_32.fetch.execute;
+
+i_bs_get_integer_32.head() {
+ Eterm context;
+}
+
+i_bs_get_integer_32.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_integer_32.execute(Fail, Dst) {
Uint32 _integer;
- ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+ ErlBinMatchBuffer* _mb = ms_matchbuffer(context);
if (_mb->size - _mb->offset < 32) {
$FAIL($Fail);
@@ -894,15 +1033,23 @@ bs_get_integer.execute(Fail, Flags, Dst) {
$Dst = result;
}
-i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) {
+i_bs_get_integer := i_bs_get_integer.fetch.execute;
+
+i_bs_get_integer.head() {
+ Eterm context;
+}
+
+i_bs_get_integer.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_integer.execute(Fail, Live, FlagsAndUnit, Sz, Dst) {
Uint flags;
Uint size;
- Eterm ms;
ErlBinMatchBuffer* mb;
Eterm result;
flags = $FlagsAndUnit;
- ms = $Ms;
$BS_GET_FIELD_SIZE($Sz, (flags >> 3), $FAIL($Fail), size);
if (size >= SMALL_BITS) {
Uint wordsneeded;
@@ -913,15 +1060,15 @@ i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) {
* Remember to re-acquire the matchbuffer after gc.
*/
- mb = ms_matchbuffer(ms);
+ mb = ms_matchbuffer(context);
if (mb->size - mb->offset < size) {
$FAIL($Fail);
}
wordsneeded = 1+WSIZE(NBYTES((Uint) size));
- $GC_TEST_PRESERVE(wordsneeded, $Live, ms);
+ $GC_TEST_PRESERVE(wordsneeded, $Live, context);
$REFRESH_GEN_DEST();
}
- mb = ms_matchbuffer(ms);
+ mb = ms_matchbuffer(context);
LIGHT_SWAPOUT;
result = erts_bs_get_integer_2(c_p, size, flags, mb);
LIGHT_SWAPIN;
@@ -932,9 +1079,19 @@ i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) {
$Dst = result;
}
-i_bs_get_utf8(Ctx, Fail, Dst) {
+i_bs_get_utf8 := i_bs_get_utf8.fetch.execute;
+
+i_bs_get_utf8.head() {
+ Eterm context;
+}
+
+i_bs_get_utf8.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_utf8.execute(Fail, Dst) {
Eterm result;
- ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx);
+ ErlBinMatchBuffer* mb = ms_matchbuffer(context);
if (mb->size - mb->offset < 8) {
$FAIL($Fail);
@@ -957,8 +1114,18 @@ i_bs_get_utf8(Ctx, Fail, Dst) {
$Dst = result;
}
-i_bs_get_utf16(Ctx, Fail, Flags, Dst) {
- ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx);
+i_bs_get_utf16 := i_bs_get_utf16.fetch.execute;
+
+i_bs_get_utf16.head() {
+ Eterm context;
+}
+
+i_bs_get_utf16.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_utf16.execute(Fail, Flags, Dst) {
+ ErlBinMatchBuffer* mb = ms_matchbuffer(context);
Eterm result = erts_bs_get_utf16(mb, $Flags);
if (is_non_value(result)) {
@@ -1055,13 +1222,20 @@ i_bs_restore2(Src, Slot) {
_ms->mb.offset = _ms->save_offset[$Slot];
}
-bs_get_tail(Src, Dst, Live) {
- ErlBinMatchBuffer* mb;
- Uint size, offs;
- ErlSubBin* sb;
+bs_get_tail := bs_get_tail.fetch.execute;
+
+bs_get_tail.head() {
Eterm context;
+}
+bs_get_tail.fetch(Src) {
context = $Src;
+}
+
+bs_get_tail.execute(Dst, Live) {
+ ErlBinMatchBuffer* mb;
+ Uint size, offs;
+ ErlSubBin* sb;
ASSERT(header_is_bin_matchstate(*boxed_val(context)));
@@ -1090,11 +1264,20 @@ bs_get_tail(Src, Dst, Live) {
%if ARCH_64
-i_bs_start_match3_gp(Src, Live, Fail, Dst, Pos) {
- Eterm context, header;
- Uint position, live;
+i_bs_start_match3_gp := i_bs_start_match3_gp.fetch.execute;
+i_bs_start_match3_gp.head() {
+ Eterm context;
+}
+
+i_bs_start_match3_gp.fetch(Src) {
context = $Src;
+}
+
+i_bs_start_match3_gp.execute(Live, Fail, Dst, Pos) {
+ Eterm header;
+ Uint position, live;
+
live = $Live;
if (!is_boxed(context)) {
@@ -1139,11 +1322,20 @@ i_bs_start_match3_gp(Src, Live, Fail, Dst, Pos) {
$Pos = make_small(position);
}
-i_bs_start_match3(Src, Live, Fail, Dst) {
- Eterm context, header;
- Uint live;
+i_bs_start_match3 := i_bs_start_match3.fetch.execute;
+
+i_bs_start_match3.head() {
+ Eterm context;
+}
+i_bs_start_match3.fetch(Src) {
context = $Src;
+}
+
+i_bs_start_match3.execute(Live, Fail, Dst) {
+ Eterm header;
+ Uint live;
+
live = $Live;
if (!is_boxed(context)) {
@@ -1213,12 +1405,19 @@ i_bs_get_position(Ctx, Dst) {
# match at a position beyond 16MB.
#
-bs_set_position(Ctx, Pos) {
+bs_set_position := bs_set_position.fetch.execute;
+
+bs_set_position.head() {
Eterm context, position;
- ErlBinMatchState *ms;
+}
+bs_set_position.fetch(Ctx, Pos) {
context = $Ctx;
position = $Pos;
+}
+
+bs_set_position.execute() {
+ ErlBinMatchState *ms;
ASSERT(header_is_bin_matchstate(*boxed_val(context)));
ms = (ErlBinMatchState*)boxed_val(context);
@@ -1231,12 +1430,19 @@ bs_set_position(Ctx, Pos) {
}
}
-bs_get_position(Ctx, Dst, Live) {
- ErlBinMatchState *ms;
+bs_get_position := bs_get_position.fetch.execute;
+
+bs_get_position.head() {
Eterm context;
- Uint position;
+}
+bs_get_position.fetch(Ctx) {
context = $Ctx;
+}
+
+bs_get_position.execute(Dst, Live) {
+ ErlBinMatchState *ms;
+ Uint position;
ASSERT(header_is_bin_matchstate(*boxed_val(context)));
ms = (ErlBinMatchState*)boxed_val(context);
@@ -1261,11 +1467,20 @@ bs_get_position(Ctx, Dst, Live) {
}
}
-i_bs_start_match3(Src, Live, Fail, Dst) {
- Eterm context, header;
- Uint live;
+i_bs_start_match3 := i_bs_start_match3.fetch.execute;
+
+i_bs_start_match3.head() {
+ Eterm context;
+}
+i_bs_start_match3.fetch(Src) {
context = $Src;
+}
+
+i_bs_start_match3.execute(Live, Fail, Dst) {
+ Eterm header;
+ Uint live;
+
live = $Live;
if (!is_boxed(context)) {
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 74708b2caa..8c51bdb630 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -4689,6 +4689,16 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(am_ok);
}
}
+ else if (ERTS_IS_ATOM_STR("mbuf", BIF_ARG_1)) {
+ Uint sz = size_object(BIF_ARG_2);
+ ErlHeapFragment* frag = new_message_buffer(sz);
+ Eterm *hp = frag->mem;
+ Eterm copy = copy_struct(BIF_ARG_2, sz, &hp, &frag->off_heap);
+ frag->next = BIF_P->mbuf;
+ BIF_P->mbuf = frag;
+ BIF_P->mbuf_sz += sz;
+ BIF_RET(copy);
+ }
}
BIF_ERROR(BIF_P, BADARG);
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index f05a3b51c9..426c7d2d48 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -1323,11 +1323,7 @@ static int match_traverse(Process* p, DbTableHash* tb,
unlock_hash_function(lck);
break;
}
- if (iterations_left <= 0 || MBUF(p)) {
- /*
- * We have either reached our limit, or just created some heap fragments.
- * Since many heap fragments will make the GC slower, trap and GC now.
- */
+ if (iterations_left <= 0) {
unlock_hash_function(lck);
ret_value = ctx->on_trap(ctx, slot_ix, got, &mpi.mp, ret);
goto done;
@@ -1433,11 +1429,7 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
unlock_hash_function(lck);
break;
}
- if (iterations_left <= 0 || MBUF(p)) {
- /*
- * We have either reached our limit, or just created some heap fragments.
- * Since many heap fragments will make the GC slower, trap and GC now.
- */
+ if (iterations_left <= 0) {
unlock_hash_function(lck);
ret_value = ctx->on_trap(ctx, slot_ix, got, mpp, ret);
goto done;
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 02a5934a6e..fe57348700 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -3743,13 +3743,6 @@ static int doit_select(DbTableCommon *tb, TreeDbTerm *this,
if (is_value(ret)) {
sc->accum = CONS(hp, ret, sc->accum);
}
- if (MBUF(sc->p)) {
- /*
- * Force a trap and GC if a heap fragment was created. Many heap fragments
- * make the GC slow.
- */
- sc->max = 0;
- }
if (--(sc->max) <= 0) {
return 0;
}
@@ -3806,13 +3799,6 @@ static int doit_select_chunk(DbTableCommon *tb, TreeDbTerm *this,
++(sc->got);
sc->accum = CONS(hp, ret, sc->accum);
}
- if (MBUF(sc->p)) {
- /*
- * Force a trap and GC if a heap fragment was created. Many heap fragments
- * make the GC slow.
- */
- sc->max = 0;
- }
if (--(sc->max) <= 0 || sc->got == sc->chunk_size) {
return 0;
}
diff --git a/erts/emulator/beam/instrs.tab b/erts/emulator/beam/instrs.tab
index fc88cab22f..1eb83b61f2 100644
--- a/erts/emulator/beam/instrs.tab
+++ b/erts/emulator/beam/instrs.tab
@@ -512,6 +512,15 @@ move_shift(Src, SD, D) {
$SD = V;
}
+move_window2(S1, S2, D) {
+ Eterm xt0, xt1;
+ Eterm* y = &$D;
+ xt0 = $S1;
+ xt1 = $S2;
+ y[0] = xt0;
+ y[1] = xt1;
+}
+
move_window3(S1, S2, S3, D) {
Eterm xt0, xt1, xt2;
Eterm* y = &$D;
@@ -875,12 +884,16 @@ i_is_ne_exact_literal(Fail, Src, Literal) {
}
}
-is_eq(Fail, X, Y) {
- CMP_EQ_ACTION($X, $Y, $FAIL($Fail));
+is_eq(Fail, A, B) {
+ Eterm a = $A;
+ Eterm b = $B;
+ CMP_EQ_ACTION(a, b, $FAIL($Fail));
}
-is_ne(Fail, X, Y) {
- CMP_NE_ACTION($X, $Y, $FAIL($Fail));
+is_ne(Fail, A, B) {
+ Eterm a = $A;
+ Eterm b = $B;
+ CMP_NE_ACTION(a, b, $FAIL($Fail));
}
is_lt(Fail, X, Y) {
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 3cfc685336..e688c6996b 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -215,11 +215,7 @@ set_tuple_element s S P
# Get tuple element
-i_get_tuple_element xy P x
-
-%cold
-i_get_tuple_element xy P y
-%hot
+i_get_tuple_element xy P xy
i_get_tuple_element2 x P x
i_get_tuple_element2_dst x P x x
@@ -285,6 +281,9 @@ move_window/6
move X1=x Y1=y | move X2=x Y2=y | move X3=x Y3=y | succ(Y1,Y2) | succ(Y2,Y3) => \
move_window X1 X2 X3 Y1 Y3
+move X1=x Y1=y | move X2=x Y2=y | succ(Y1,Y2) => \
+ move_window2 X1 X2 Y1
+
move_window X1=x X2=x X3=x Y1=y Y3=y | move X4=x Y4=y | succ(Y3,Y4) => \
move_window X1 X2 X3 X4 Y1 Y4
@@ -294,6 +293,7 @@ move_window X1=x X2=x X3=x X4=x Y1=y Y4=y | move X5=x Y5=y | succ(Y4,Y5) => \
move_window X1=x X2=x X3=x Y1=y Y3=y => move_window3 X1 X2 X3 Y1
move_window X1=x X2=x X3=x X4=x Y1=y Y4=y => move_window4 X1 X2 X3 X4 Y1
+move_window2 x x y
move_window3 x x x y
move_window4 x x x x y
move_window5 x x x x x y
@@ -346,11 +346,6 @@ move X1=x X2=x | move X3=x X4=x | independent_moves(X1, X2, X3, X4) => \
move2_par X1 X2 X3 X4
move2_par x x x x
-# move2_par x y x y
-
-move X1=x Y1=y | move X2=x Y2=y => move2_par X1 Y1 X2 Y2
-move2_par x y x y
-
# move2_par x x x y
move X1=x X2=x | move X3=x Y1=y | independent_moves(X1, X2, X3, Y1) => \
@@ -393,20 +388,14 @@ move3 x x x x x x
move C=aiq X=x==1 => move_x1 C
move C=aiq X=x==2 => move_x2 C
+move n D=y => init D
+
move_x1 c
move_x2 c
-# The compiler almost never generates a "move Literal y(Y)" instruction,
-# so let's cheat if we encounter one.
-move S=n D=y => init D
-move S=c D=y => move S x | move x D
-
-move x x
-move x y
-move y x
-move c x
+move xy xy
+move c xy
move n x
-move y y
# The following move instructions using x(0) are frequently used.
@@ -500,9 +489,13 @@ is_ge f? c x
is_ge f? s s
%hot
-is_eq f? s s
+is_eq Fail=f Const=c Reg=xy => is_eq Fail Reg Const
+is_eq Fail=f C1=c C2=c => move C1 x | is_eq Fail x C2
+is_eq f? S s
-is_ne f? s s
+is_ne Fail=f Const=c Reg=xy => is_ne Fail Reg Const
+is_ne Fail=f C1=c C2=c => move C1 x | is_ne Fail x C2
+is_ne f? S s
#
# Putting tuples.
@@ -1035,6 +1028,9 @@ call_bif e
bif0 u$bif:erlang:self/0 Dst=d => self Dst
bif0 u$bif:erlang:node/0 Dst=d => node Dst
+bif1 Fail=f Bif=u$bif:erlang:hd/1 Src=x Dst=x => is_nonempty_list_get_hd Fail Src Dst
+bif1 Fail=f Bif=u$bif:erlang:tl/1 Src=x Dst=x => is_nonempty_list_get_tl Fail Src Dst
+
bif1 Fail Bif=u$bif:erlang:get/1 Src=s Dst=d => gen_get(Src, Dst)
bif2 Jump=j u$bif:erlang:element/2 S1=s S2=xy Dst=d => gen_element(Jump, S1, S2, Dst)
@@ -1123,19 +1119,33 @@ is_function Fail=f c => jump Fail
func_info M F A => i_func_info u M F A
# ================================================================
-# New bit syntax matching (R11B).
+# Bit syntax matching obsoleted in OTP 22.
# ================================================================
-%warm
+%cold
bs_start_match2 Fail=f ica X Y D => jump Fail
bs_start_match2 Fail Bin X Y D => i_bs_start_match2 Bin Fail X Y D
i_bs_start_match2 xy f t t d
+bs_save2 Y=y Index => move Y x | bs_save2 x Index
bs_save2 Reg Index => gen_bs_save(Reg, Index)
-i_bs_save2 xy t
+i_bs_save2 x t
+bs_restore2 Y=y Index => move Y x | bs_restore2 x Index
bs_restore2 Reg Index => gen_bs_restore(Reg, Index)
-i_bs_restore2 xy t
+i_bs_restore2 x t
+
+bs_context_to_binary Y=y | line L | badmatch Y => \
+ move Y x | bs_context_to_binary x | line L | badmatch x
+bs_context_to_binary Y=y => move Y x | bs_context_to_binary x
+bs_context_to_binary x
+%warm
+
+# ================================================================
+# New bit syntax matching (R11B).
+# ================================================================
+
+%warm
# Matching integers
bs_match_string Fail Ms Bits Val => i_bs_match_string Ms Fail Bits Val
@@ -1154,7 +1164,7 @@ i_bs_get_integer_imm Ms Bits Live Fail Flags Y=y => \
i_bs_get_integer_small_imm xy W f? t x
i_bs_get_integer_imm xy W t f? t x
-i_bs_get_integer f? t t xy s d
+i_bs_get_integer xy f? t t s d
i_bs_get_integer_8 xy f? d
i_bs_get_integer_16 xy f? d
@@ -1166,9 +1176,9 @@ i_bs_get_integer_32 xy f? d
bs_get_binary2 Fail=f Ms=xy Live=u Sz=sq Unit=u Flags=u Dst=d => \
gen_get_binary2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
-i_bs_get_binary_imm2 f? xy t W t d
-i_bs_get_binary2 f xy t? s t d
-i_bs_get_binary_all2 f? xy t t d
+i_bs_get_binary_imm2 xy f? t W t d
+i_bs_get_binary2 xy f t? s t d
+i_bs_get_binary_all2 xy f? t t d
i_bs_get_binary_all_reuse xy f? t
# Fetching float from binaries.
@@ -1177,7 +1187,7 @@ bs_get_float2 Fail=f Ms=xy Live=u Sz=s Unit=u Flags=u Dst=d => \
bs_get_float2 Fail=f Ms=x Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail
-i_bs_get_float2 f? xy t s t d
+i_bs_get_float2 xy f? t s t d
# Miscellanous
@@ -1185,8 +1195,7 @@ bs_skip_bits2 Fail=f Ms=xy Sz=sq Unit=u Flags=u => \
gen_skip_bits2(Fail, Ms, Sz, Unit, Flags)
i_bs_skip_bits_imm2 f? xy W
-i_bs_skip_bits2 f? xy xy t
-i_bs_skip_bits_all2 f? xy t
+i_bs_skip_bits2 xy xy f? t
bs_test_tail2 Fail=f Ms=xy Bits=u==0 => bs_test_zero_tail2 Fail Ms
bs_test_tail2 Fail=f Ms=xy Bits=u => bs_test_tail_imm2 Fail Ms Bits
@@ -1197,16 +1206,6 @@ bs_test_unit F Ms Unit=u==8 => bs_test_unit8 F Ms
bs_test_unit f? xy t
bs_test_unit8 f? xy
-# An y register operand for bs_context_to_binary is rare,
-# but can happen because of inlining.
-
-bs_context_to_binary Y=y | line L | badmatch Y => \
- move Y x | bs_context_to_binary x | line L | badmatch x
-
-bs_context_to_binary Y=y => move Y x | bs_context_to_binary x
-
-bs_context_to_binary x
-
# Gets a bitstring from the tail of a context.
bs_get_tail xy d t
@@ -1332,31 +1331,35 @@ i_bs_private_append j? t s S x
bs_put_integer Fail=j Sz=sq Unit=u Flags=u Src=s => \
gen_put_integer(Fail, Sz, Unit, Flags, Src)
-i_new_bs_put_integer j? s t s
-i_new_bs_put_integer_imm j? W t s
+i_new_bs_put_integer j? S t s
+i_new_bs_put_integer_imm xyc j? W t
#
# Utf8/utf16/utf32 support. (R12B-5)
#
-bs_utf8_size j Src=s Dst=d => i_bs_utf8_size Src Dst
-
-i_bs_utf8_size s x
-
-bs_utf16_size j Src=s Dst=d => i_bs_utf16_size Src Dst
+bs_utf8_size j Src Dst=d => i_bs_utf8_size Src Dst
+bs_utf16_size j Src Dst=d => i_bs_utf16_size Src Dst
-i_bs_utf16_size s x
+bs_put_utf8 Fail u Src => i_bs_put_utf8 Fail Src
-bs_put_utf8 Fail u Src=s => i_bs_put_utf8 Fail Src
+bs_put_utf32 Fail=j Flags=u Src=s => \
+ i_bs_validate_unicode Fail Src | bs_put_integer Fail i=32 u=1 Flags Src
-i_bs_put_utf8 j? s
+i_bs_utf8_size S x
+i_bs_utf16_size S x
-bs_put_utf16 j? t s
+i_bs_put_utf8 j? S
+bs_put_utf16 j? t S
-bs_put_utf32 Fail=j Flags=u Src=s => \
- i_bs_validate_unicode Fail Src | bs_put_integer Fail i=32 u=1 Flags Src
+i_bs_validate_unicode j? S
-i_bs_validate_unicode j? s
+# Handle unoptimized code.
+i_bs_utf8_size Src=c Dst => move Src x | i_bs_utf8_size x Dst
+i_bs_utf16_size Src=c Dst => move Src x | i_bs_utf16_size x Dst
+i_bs_put_utf8 Fail Src=c => move Src x | i_bs_put_utf8 Fail x
+bs_put_utf16 Fail Flags Src=c => move Src x | bs_put_utf16 Fail Flags x
+i_bs_validate_unicode Fail Src=c => move Src x | i_bs_validate_unicode Fail x
#
# Storing floats into binaries.
@@ -1366,7 +1369,7 @@ bs_put_float Fail Sz=q Unit Flags Val => badarg Fail
bs_put_float Fail=j Sz=s Unit=u Flags=u Src=s => \
gen_put_float(Fail, Sz, Unit, Flags, Src)
-i_new_bs_put_float j? s t s
+i_new_bs_put_float j? S t s
i_new_bs_put_float_imm j? W t s
#
@@ -1376,9 +1379,18 @@ i_new_bs_put_float_imm j? W t s
bs_put_binary Fail=j Sz=s Unit=u Flags=u Src=s => \
gen_put_binary(Fail, Sz, Unit, Flags, Src)
-i_new_bs_put_binary j? s t s
-i_new_bs_put_binary_imm j? W s
-i_new_bs_put_binary_all j? s t
+# In unoptimized code, the binary argument could be a literal. (In optimized code,
+# there would be a bs_put_string instruction.)
+i_new_bs_put_binary Fail Size Unit Lit=c => \
+ move Lit x | i_new_bs_put_binary Fail Size Unit x
+i_new_bs_put_binary_imm Fail Size Lit=c => \
+ move Lit x | i_new_bs_put_binary_imm Fail Size x
+i_new_bs_put_binary_all Lit=c Fail Unit => \
+ move Lit x | i_new_bs_put_binary_all x Fail Unit
+
+i_new_bs_put_binary j? S t S
+i_new_bs_put_binary_imm j? W S
+i_new_bs_put_binary_all xy j? t
#
# Warning: The i_bs_put_string and i_new_bs_put_string instructions
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index a69da4d762..a6312293cc 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -111,6 +111,23 @@
#endif
#endif
+/*
+ * Test for clang's convenient __has_builtin feature checking macro.
+ */
+#ifndef __has_builtin
+ #define __has_builtin(x) 0
+#endif
+
+/*
+ * Define HAVE_OVERFLOW_CHECK_BUILTINS if the overflow checking arithmetic
+ * builtins are available.
+ */
+#if ERTS_AT_LEAST_GCC_VSN__(5, 1, 0)
+# define HAVE_OVERFLOW_CHECK_BUILTINS 1
+#elif __has_builtin(__builtin_mul_overflow)
+# define HAVE_OVERFLOW_CHECK_BUILTINS 1
+#endif
+
#include "erl_misc_utils.h"
/*
diff --git a/erts/emulator/utils/beam_makeops b/erts/emulator/utils/beam_makeops
index f73e2362bf..1625b2cc65 100755
--- a/erts/emulator/utils/beam_makeops
+++ b/erts/emulator/utils/beam_makeops
@@ -1488,7 +1488,7 @@ sub code_gen {
$var_decls .= "Eterm $tmp;\n";
$tmp_arg_num++;
push(@f, $tmp);
- $prefix .= "GetR($arg_offset, $tmp);\n";
+ $prefix .= "GetSource(" . arg_offset($arg_offset) . ", $tmp);\n";
$need_block = 1;
last SWITCH;
};
diff --git a/lib/crypto/c_src/atoms.c b/lib/crypto/c_src/atoms.c
index 2e417da7f4..798c26c9bb 100644
--- a/lib/crypto/c_src/atoms.c
+++ b/lib/crypto/c_src/atoms.c
@@ -41,6 +41,18 @@ ERL_NIF_TERM atom_not_enabled;
ERL_NIF_TERM atom_not_supported;
#endif
+ERL_NIF_TERM atom_type;
+ERL_NIF_TERM atom_size;
+ERL_NIF_TERM atom_block_size;
+ERL_NIF_TERM atom_key_length;
+ERL_NIF_TERM atom_iv_length;
+ERL_NIF_TERM atom_mode;
+ERL_NIF_TERM atom_ecb_mode;
+ERL_NIF_TERM atom_cbc_mode;
+ERL_NIF_TERM atom_cfb_mode;
+ERL_NIF_TERM atom_ofb_mode;
+ERL_NIF_TERM atom_stream_cipher;
+
#if defined(HAVE_EC)
ERL_NIF_TERM atom_prime_field;
ERL_NIF_TERM atom_characteristic_two_field;
@@ -140,6 +152,18 @@ int init_atoms(ErlNifEnv *env, const ERL_NIF_TERM fips_mode, const ERL_NIF_TERM
atom_notsup = enif_make_atom(env,"notsup");
atom_digest = enif_make_atom(env,"digest");
+ atom_type = enif_make_atom(env,"type");
+ atom_size = enif_make_atom(env,"size");
+ atom_block_size = enif_make_atom(env,"block_size");
+ atom_key_length = enif_make_atom(env,"key_length");
+ atom_iv_length = enif_make_atom(env,"iv_length");
+ atom_mode = enif_make_atom(env,"mode");
+ atom_ecb_mode = enif_make_atom(env,"ecb_mode");
+ atom_cbc_mode = enif_make_atom(env,"cbc_mode");
+ atom_cfb_mode = enif_make_atom(env,"cfb_mode");
+ atom_ofb_mode = enif_make_atom(env,"ofb_mode");
+ atom_stream_cipher = enif_make_atom(env,"stream_cipher");
+
#if defined(HAVE_EC)
atom_prime_field = enif_make_atom(env,"prime_field");
atom_characteristic_two_field = enif_make_atom(env,"characteristic_two_field");
diff --git a/lib/crypto/c_src/atoms.h b/lib/crypto/c_src/atoms.h
index f15523d865..f8e9211459 100644
--- a/lib/crypto/c_src/atoms.h
+++ b/lib/crypto/c_src/atoms.h
@@ -45,6 +45,18 @@ extern ERL_NIF_TERM atom_not_enabled;
extern ERL_NIF_TERM atom_not_supported;
#endif
+extern ERL_NIF_TERM atom_type;
+extern ERL_NIF_TERM atom_size;
+extern ERL_NIF_TERM atom_block_size;
+extern ERL_NIF_TERM atom_key_length;
+extern ERL_NIF_TERM atom_iv_length;
+extern ERL_NIF_TERM atom_mode;
+extern ERL_NIF_TERM atom_ecb_mode;
+extern ERL_NIF_TERM atom_cbc_mode;
+extern ERL_NIF_TERM atom_cfb_mode;
+extern ERL_NIF_TERM atom_ofb_mode;
+extern ERL_NIF_TERM atom_stream_cipher;
+
#if defined(HAVE_EC)
extern ERL_NIF_TERM atom_prime_field;
extern ERL_NIF_TERM atom_characteristic_two_field;
diff --git a/lib/crypto/c_src/cipher.c b/lib/crypto/c_src/cipher.c
index f8e44b228a..c055a62654 100644
--- a/lib/crypto/c_src/cipher.c
+++ b/lib/crypto/c_src/cipher.c
@@ -67,14 +67,26 @@ static struct cipher_type_t cipher_types[] =
{{"aes_cfb8"}, {&EVP_aes_192_cfb8}, 24, NO_FIPS_CIPHER | AES_CFBx},
{{"aes_cfb8"}, {&EVP_aes_256_cfb8}, 32, NO_FIPS_CIPHER | AES_CFBx},
+ {{"aes_128_cfb8"}, {&EVP_aes_128_cfb8}, 16, NO_FIPS_CIPHER | AES_CFBx},
+ {{"aes_192_cfb8"}, {&EVP_aes_192_cfb8}, 24, NO_FIPS_CIPHER | AES_CFBx},
+ {{"aes_256_cfb8"}, {&EVP_aes_256_cfb8}, 32, NO_FIPS_CIPHER | AES_CFBx},
+
{{"aes_cfb128"}, {&EVP_aes_128_cfb128}, 16, NO_FIPS_CIPHER | AES_CFBx},
{{"aes_cfb128"}, {&EVP_aes_192_cfb128}, 24, NO_FIPS_CIPHER | AES_CFBx},
{{"aes_cfb128"}, {&EVP_aes_256_cfb128}, 32, NO_FIPS_CIPHER | AES_CFBx},
+ {{"aes_128_cfb128"}, {&EVP_aes_128_cfb128}, 16, NO_FIPS_CIPHER | AES_CFBx},
+ {{"aes_192_cfb128"}, {&EVP_aes_192_cfb128}, 24, NO_FIPS_CIPHER | AES_CFBx},
+ {{"aes_256_cfb128"}, {&EVP_aes_256_cfb128}, 32, NO_FIPS_CIPHER | AES_CFBx},
+
{{"aes_ecb"}, {&EVP_aes_128_ecb}, 16, ECB_BUG_0_9_8L},
{{"aes_ecb"}, {&EVP_aes_192_ecb}, 24, ECB_BUG_0_9_8L},
{{"aes_ecb"}, {&EVP_aes_256_ecb}, 32, ECB_BUG_0_9_8L},
+ {{"aes_128_ecb"}, {&EVP_aes_128_ecb}, 16, ECB_BUG_0_9_8L},
+ {{"aes_192_ecb"}, {&EVP_aes_192_ecb}, 24, ECB_BUG_0_9_8L},
+ {{"aes_256_ecb"}, {&EVP_aes_256_ecb}, 32, ECB_BUG_0_9_8L},
+
#if defined(HAVE_EVP_AES_CTR)
{{"aes_128_ctr"}, {&EVP_aes_128_ctr}, 16, 0},
{{"aes_192_ctr"}, {&EVP_aes_192_ctr}, 24, 0},
@@ -207,6 +219,87 @@ int cmp_cipher_types(const void *keyp, const void *elemp) {
}
+ERL_NIF_TERM cipher_info_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{/* (Type) */
+ const struct cipher_type_t *cipherp;
+ const EVP_CIPHER *cipher;
+ ERL_NIF_TERM ret, ret_mode;
+ unsigned type;
+ unsigned long mode;
+
+ if ((cipherp = get_cipher_type_no_key(argv[0])) == NULL)
+ return enif_make_badarg(env);
+
+ if (FORBIDDEN_IN_FIPS(cipherp))
+ return enif_raise_exception(env, atom_notsup);
+ if ((cipher = cipherp->cipher.p) == NULL)
+ return enif_raise_exception(env, atom_notsup);
+
+ ret = enif_make_new_map(env);
+
+ type = EVP_CIPHER_type(cipher);
+ enif_make_map_put(env, ret, atom_type,
+ type == NID_undef ? atom_undefined : enif_make_int(env, type),
+ &ret);
+
+ enif_make_map_put(env, ret, atom_key_length,
+ enif_make_int(env, EVP_CIPHER_key_length(cipher)), &ret);
+ enif_make_map_put(env, ret, atom_iv_length,
+ enif_make_int(env, EVP_CIPHER_iv_length(cipher)), &ret);
+ enif_make_map_put(env, ret, atom_block_size,
+ enif_make_int(env, EVP_CIPHER_block_size(cipher)), &ret);
+
+ mode = EVP_CIPHER_mode(cipher);
+ switch (mode) {
+ case EVP_CIPH_ECB_MODE:
+ ret_mode = atom_ecb_mode;
+ break;
+
+ case EVP_CIPH_CBC_MODE:
+ ret_mode = atom_cbc_mode;
+ break;
+
+ case EVP_CIPH_CFB_MODE:
+ ret_mode = atom_cfb_mode;
+ break;
+
+ case EVP_CIPH_OFB_MODE:
+ ret_mode = atom_ofb_mode;
+ break;
+
+ case EVP_CIPH_STREAM_CIPHER:
+ ret_mode = atom_stream_cipher;
+ break;
+
+ default:
+ ret_mode = atom_undefined;
+ break;
+ }
+
+ enif_make_map_put(env, ret, atom_mode, ret_mode, &ret);
+
+ return ret;
+}
+
+const struct cipher_type_t* get_cipher_type_no_key(ERL_NIF_TERM type)
+{
+ struct cipher_type_t key;
+
+ key.type.atom = type;
+
+ return bsearch(&key, cipher_types, num_cipher_types, sizeof(cipher_types[0]), cmp_cipher_types_no_key);
+}
+
+int cmp_cipher_types_no_key(const void *keyp, const void *elemp) {
+ const struct cipher_type_t *key = keyp;
+ const struct cipher_type_t *elem = elemp;
+
+ if (key->type.atom < elem->type.atom) return -1;
+ else if (key->type.atom > elem->type.atom) return 1;
+ else /* key->type.atom == elem->type.atom */ return 0;
+}
+
+
ERL_NIF_TERM cipher_types_as_list(ErlNifEnv* env)
{
struct cipher_type_t* p;
diff --git a/lib/crypto/c_src/cipher.h b/lib/crypto/c_src/cipher.h
index 6b43afea99..b0d9d324e1 100644
--- a/lib/crypto/c_src/cipher.h
+++ b/lib/crypto/c_src/cipher.h
@@ -61,12 +61,16 @@ struct evp_cipher_ctx {
EVP_CIPHER_CTX* ctx;
};
+ERL_NIF_TERM cipher_info_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+
int init_cipher_ctx(ErlNifEnv *env);
void init_cipher_types(ErlNifEnv* env);
+const struct cipher_type_t* get_cipher_type_no_key(ERL_NIF_TERM type);
const struct cipher_type_t* get_cipher_type(ERL_NIF_TERM type, size_t key_len);
int cmp_cipher_types(const void *keyp, const void *elemp);
+int cmp_cipher_types_no_key(const void *keyp, const void *elemp);
ERL_NIF_TERM cipher_types_as_list(ErlNifEnv* env);
diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c
index 06439c34b2..261590d9a5 100644
--- a/lib/crypto/c_src/crypto.c
+++ b/lib/crypto/c_src/crypto.c
@@ -67,6 +67,7 @@ static ErlNifFunc nif_funcs[] = {
{"info_fips", 0, info_fips, 0},
{"enable_fips_mode", 1, enable_fips_mode, 0},
{"algorithms", 0, algorithms, 0},
+ {"hash_info", 1, hash_info_nif, 0},
{"hash_nif", 2, hash_nif, 0},
{"hash_init_nif", 1, hash_init_nif, 0},
{"hash_update_nif", 2, hash_update_nif, 0},
@@ -78,6 +79,7 @@ static ErlNifFunc nif_funcs[] = {
{"hmac_final_nif", 1, hmac_final_nif, 0},
{"hmac_final_nif", 2, hmac_final_nif, 0},
{"cmac_nif", 3, cmac_nif, 0},
+ {"cipher_info_nif", 1, cipher_info_nif, 0},
{"block_crypt_nif", 5, block_crypt_nif, 0},
{"block_crypt_nif", 4, block_crypt_nif, 0},
{"aes_ige_crypt_nif", 4, aes_ige_crypt_nif, 0},
diff --git a/lib/crypto/c_src/hash.c b/lib/crypto/c_src/hash.c
index 457e9d071a..0a9f64acef 100644
--- a/lib/crypto/c_src/hash.c
+++ b/lib/crypto/c_src/hash.c
@@ -61,6 +61,32 @@ int init_hash_ctx(ErlNifEnv* env) {
#endif
}
+ERL_NIF_TERM hash_info_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{/* (Type) */
+ struct digest_type_t *digp = NULL;
+ const EVP_MD *md;
+ ERL_NIF_TERM ret;
+
+ ASSERT(argc == 1);
+
+ if ((digp = get_digest_type(argv[0])) == NULL)
+ return enif_make_badarg(env);
+
+ if ((md = digp->md.p) == NULL)
+ return atom_notsup;
+
+ ret = enif_make_new_map(env);
+
+ enif_make_map_put(env, ret, atom_type,
+ enif_make_int(env, EVP_MD_type(md)), &ret);
+ enif_make_map_put(env, ret, atom_size,
+ enif_make_int(env, EVP_MD_size(md)), &ret);
+ enif_make_map_put(env, ret, atom_block_size,
+ enif_make_int(env, EVP_MD_block_size(md)), &ret);
+
+ return ret;
+}
+
ERL_NIF_TERM hash_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Type, Data) */
struct digest_type_t *digp = NULL;
diff --git a/lib/crypto/c_src/hash.h b/lib/crypto/c_src/hash.h
index 8bae07f39a..92a25cedb7 100644
--- a/lib/crypto/c_src/hash.h
+++ b/lib/crypto/c_src/hash.h
@@ -25,6 +25,7 @@
int init_hash_ctx(ErlNifEnv *env);
+ERL_NIF_TERM hash_info_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
ERL_NIF_TERM hash_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
ERL_NIF_TERM hash_init_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
ERL_NIF_TERM hash_update_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
diff --git a/lib/crypto/src/crypto.erl b/lib/crypto/src/crypto.erl
index fe8390c5b8..97a4a7a3f0 100644
--- a/lib/crypto/src/crypto.erl
+++ b/lib/crypto/src/crypto.erl
@@ -24,6 +24,7 @@
-export([start/0, stop/0, info_lib/0, info_fips/0, supports/0, enable_fips_mode/1,
version/0, bytes_to_integer/1]).
+-export([cipher_info/1, hash_info/1]).
-export([hash/2, hash_init/1, hash_update/2, hash_final/1]).
-export([sign/4, sign/5, verify/5, verify/6]).
-export([generate_key/2, generate_key/3, compute_key/4]).
@@ -403,6 +404,11 @@ enable_fips_mode(_) -> ?nif_stub.
-define(HASH_HASH_ALGORITHM, sha1() | sha2() | sha3() | blake2() | ripemd160 | compatibility_only_hash() ).
+-spec hash_info(Type) -> map() when Type :: ?HASH_HASH_ALGORITHM.
+
+hash_info(Type) ->
+ notsup_to_error(hash_info_nif(Type)).
+
-spec hash(Type, Data) -> Digest when Type :: ?HASH_HASH_ALGORITHM,
Data :: iodata(),
Digest :: binary().
@@ -531,6 +537,12 @@ poly1305(Key, Data) ->
%%%
%%%================================================================
+-spec cipher_info(Type) -> map() when Type :: block_cipher_with_iv()
+ | aead_cipher()
+ | block_cipher_without_iv().
+cipher_info(Type) ->
+ cipher_info_nif(Type).
+
%%%---- Block ciphers
%%%----------------------------------------------------------------
@@ -1726,6 +1738,7 @@ hash_update(State0, Data, _, MaxBytes) ->
State = notsup_to_error(hash_update_nif(State0, Increment)),
hash_update(State, Rest, erlang:byte_size(Rest), MaxBytes).
+hash_info_nif(_Hash) -> ?nif_stub.
hash_nif(_Hash, _Data) -> ?nif_stub.
hash_init_nif(_Hash) -> ?nif_stub.
hash_update_nif(_State, _Data) -> ?nif_stub.
@@ -1770,6 +1783,8 @@ poly1305_nif(_Key, _Data) -> ?nif_stub.
%% CIPHERS --------------------------------------------------------------------
+cipher_info_nif(_Type) -> ?nif_stub.
+
block_crypt_nif(_Type, _Key, _Ivec, _Text, _IsEncrypt) -> ?nif_stub.
block_crypt_nif(_Type, _Key, _Text, _IsEncrypt) -> ?nif_stub.
diff --git a/lib/crypto/test/crypto_SUITE.erl b/lib/crypto/test/crypto_SUITE.erl
index ab6d88deb2..7257f4fb9f 100644
--- a/lib/crypto/test/crypto_SUITE.erl
+++ b/lib/crypto/test/crypto_SUITE.erl
@@ -40,7 +40,9 @@ all() ->
rand_uniform,
rand_threads,
rand_plugin,
- rand_plugin_s
+ rand_plugin_s,
+ cipher_info,
+ hash_info
].
groups() ->
@@ -666,6 +668,23 @@ rand_plugin_s(Config) when is_list(Config) ->
rand_plugin_aux(explicit_state).
%%--------------------------------------------------------------------
+cipher_info() ->
+ [{doc, "crypto cipher_info testing"}].
+cipher_info(Config) when is_list(Config) ->
+ #{type := _,key_length := _,iv_length := _,
+ block_size := _,mode := _} = crypto:cipher_info(aes_128_cbc),
+ {'EXIT',_} = (catch crypto:cipher_info(not_a_cipher)),
+ ok.
+
+%%--------------------------------------------------------------------
+hash_info() ->
+ [{doc, "crypto hash_info testing"}].
+hash_info(Config) when is_list(Config) ->
+ #{type := _,size := _,block_size := _} = crypto:hash_info(sha256),
+ {'EXIT',_} = (catch crypto:hash_info(not_a_hash)),
+ ok.
+
+%%--------------------------------------------------------------------
%% Internal functions ------------------------------------------------
%%--------------------------------------------------------------------
hash(_, [], []) ->
diff --git a/lib/diameter/doc/src/diameter.xml b/lib/diameter/doc/src/diameter.xml
index 0a0194af2d..85522c99b2 100644
--- a/lib/diameter/doc/src/diameter.xml
+++ b/lib/diameter/doc/src/diameter.xml
@@ -1,7 +1,9 @@
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE erlref SYSTEM "erlref.dtd" [
- <!ENTITY spawn_opt
+ <!ENTITY spawn_opt2
'<seealso marker="erts:erlang#spawn_opt-2">erlang:spawn_opt/2</seealso>'>
+ <!ENTITY spawn_opt5
+ '<seealso marker="erts:erlang#spawn_opt-5">erlang:spawn_opt/5</seealso>'>
<!ENTITY nodes
'<seealso marker="erts:erlang#nodes-0">erlang:nodes/0</seealso>'>
<!ENTITY make_ref
@@ -21,7 +23,7 @@
<copyright>
<year>2011</year>
-<year>2017</year>
+<year>2019</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -1384,12 +1386,22 @@ the same peer.</p>
</item>
<tag>
-<marker id="spawn_opt"/><c>{spawn_opt, [term()]}</c></tag>
+<marker id="spawn_opt"/><c>{spawn_opt, [term()] | {M,F,A}}</c></tag>
<item>
<p>
-Options passed to &spawn_opt; when spawning a process for an
-incoming Diameter request.
-Options <c>monitor</c> and <c>link</c> are ignored.</p>
+An options list passed to &spawn_opt2; to spawn a handler process for an
+incoming Diameter request on the local node, or an MFA that returns
+the pid of a handler process.</p>
+
+<p>
+Options <c>monitor</c> and <c>link</c> are ignored in the list-valued
+case.
+An MFA is applied with an additional term prepended to its argument
+list, and should return either the pid of the handler process that
+invokes <c>diameter_traffic:request/1</c> on the term in order to
+process the request, or the atom <c>discard</c>.
+The handler process need not be local, but diameter must be started on
+the remote node.</p>
<p>
Defaults to the empty list.</p>
diff --git a/lib/diameter/doc/src/diameter_transport.xml b/lib/diameter/doc/src/diameter_transport.xml
index 67fd54bc56..0a8ef321c6 100644
--- a/lib/diameter/doc/src/diameter_transport.xml
+++ b/lib/diameter/doc/src/diameter_transport.xml
@@ -14,7 +14,8 @@
<erlref>
<header>
<copyright>
-<year>2011</year><year>2016</year>
+<year>2011</year>
+<year>2019</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -174,10 +175,13 @@ its parent.</p>
<taglist>
-<tag><c>{diameter, {send, &message;}}</c></tag>
+<tag><c>{diameter, {send, &message; | false}}</c></tag>
<item>
<p>
-An outbound Diameter message.</p>
+An outbound Diameter message.
+The atom <c>false</c> can only be received when request
+acknowledgements have been requests: see the <c>ack</c> message
+below.</p>
</item>
<tag><c>{diameter, {close, Pid}}</c></tag>
@@ -246,6 +250,27 @@ A <c>LocalAddr</c> list has the same semantics as one returned from
&start;.</p>
</item>
+<tag><c>{diameter, ack}</c></tag>
+<item>
+<p>
+Request acknowledgements of unanswered requests.
+A transport process should send this once before passing incoming
+Diameter messages into diameter.
+As a result, every Diameter request passed into diameter with a
+<c>recv</c> message (below) will be answered with a
+<c>send</c> message (above), either a &message; for the transport
+process to send or the atom <c>false</c> if the request has been
+discarded or otherwise not answered.</p>
+
+<p>
+This is to allow a transport process to keep count of the number
+of incoming request messages that have not yet been answered or
+discarded, to allow it to regulate the amount of incoming traffic.
+Both diameter_tcp and diameter_sctp request acknowledgements when a
+<c>message_cb</c> is configured, turning send/recv message into
+callbacks that can be used to regulate traffic.</p>
+</item>
+
<tag><c>{diameter, {recv, &message;}}</c></tag>
<item>
<p>
diff --git a/lib/diameter/src/base/diameter.erl b/lib/diameter/src/base/diameter.erl
index b90b794611..7f172e1fa1 100644
--- a/lib/diameter/src/base/diameter.erl
+++ b/lib/diameter/src/base/diameter.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -365,7 +365,7 @@ call(SvcName, App, Message) ->
| {connect_timer, 'Unsigned32'()}
| {watchdog_timer, 'Unsigned32'() | {module(), atom(), list()}}
| {watchdog_config, [{okay|suspect, non_neg_integer()}]}
- | {spawn_opt, list()}.
+ | {spawn_opt, list() | mfa()}.
%% Options passed to start_service/2
diff --git a/lib/diameter/src/base/diameter_callback.erl b/lib/diameter/src/base/diameter_callback.erl
index d04a416bef..3bcf550cd8 100644
--- a/lib/diameter/src/base/diameter_callback.erl
+++ b/lib/diameter/src/base/diameter_callback.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -70,7 +70,7 @@
-module(diameter_callback).
-%% Default callbacks when no aleternate is specified.
+%% Default callbacks when no alternate is specified.
-export([peer_up/3,
peer_down/3,
pick_peer/4,
diff --git a/lib/diameter/src/base/diameter_dist.erl b/lib/diameter/src/base/diameter_dist.erl
new file mode 100644
index 0000000000..5c29ea95a4
--- /dev/null
+++ b/lib/diameter/src/base/diameter_dist.erl
@@ -0,0 +1,525 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2019. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(diameter_dist).
+
+-behaviour(gen_server).
+
+%%
+%% Implements callbacks that can be configured as a spawn_opt
+%% transport configuration, to be able to distribute incoming Diameter
+%% requests to handler processes (local or remote) in various ways.
+%%
+
+%% spawn_opt callbacks
+-export([spawn_local/2,
+ spawn_local/1,
+ route_session/2,
+ route_session/1]).
+
+%% signal availability for handling incoming requests to route_sesssion/2
+-export([attach/1,
+ detach/1]).
+
+%% consistent hashing
+-export([hash/3, %% for use as default MFA in route_session/2 options map
+ hash/2]). %% arbitrary key/values
+
+-include_lib("diameter/include/diameter.hrl").
+
+%% server start
+-export([start_link/0]).
+
+%% gen_server callbacks
+-export([init/1,
+ handle_info/2,
+ handle_cast/2,
+ handle_call/3,
+ code_change/3,
+ terminate/2]).
+
+-type request() :: tuple(). %% callback argument from diameter_traffic
+
+-define(SERVER, ?MODULE). %% server monitoring node connections
+
+%% Maps a node name binary to the corresponding atom. Used by
+%% route_session/2 to map the optional value of a Session-Id to
+%% node().
+-define(NODE_TABLE, diameter_dist_node).
+
+%% Maps a diameter:service_name() to a node() that has called attach/1
+%% to declare its willingness to handle incoming requests for the
+%% service. Use by route_session/2 in case the optional value mapping
+%% has failed.
+-define(SERVICE_TABLE, diameter_dist_service).
+
+-define(B(A), atom_to_binary(A, utf8)).
+-define(ORCOND(List), list_to_tuple(['orelse', false | List])).
+-define(HASH(T), erlang:phash2(T, 16#100000000)).
+
+%% spawn_local/2
+%%
+%% Callback that is equivalent to an options list. That is, the
+%% following are equivalent when passed as options to
+%% diameter:add_transport/2.
+%%
+%% {spawn_opt, Opts}
+%% {spawn_opt, {diameter_dist, spawn_local, [Opts]}}
+
+-spec spawn_local(ReqT :: request(), Opts :: list())
+ -> pid().
+
+spawn_local(ReqT, Opts) ->
+ spawn_opt(diameter_traffic, request, [ReqT], Opts).
+
+%% spawn_local/1
+
+spawn_local(ReqT) ->
+ spawn_local(ReqT, []).
+
+%% route_session/2
+%%
+%% Callback that maps the Session-Id of an incoming request to a
+%% handler node.
+%%
+%% With an options list, maps an id whose optional value is the name
+%% of a connected node to the same node, to handle the case that the
+%% session id has been returned from diameter:session_id/1; otherwise
+%% to a node that has called diameter_dist:attach/1 using the
+%% consistent hashing provided by hash/3, or to the local node() if a
+%% session id could not be extracted or there are no attached nodes. A
+%% handler process is spawned on the selected node using
+%% erlang:spawn_opt/4.
+%%
+%% Different behaviour can be configured by supplying an options map
+%% of the following form:
+%%
+%% #{search => non_neg_integer(),
+%% id => [binary()],
+%% default => discard | local | mfa(),
+%% dispatch => list() | mfa()}
+%%
+%% The search member limits the number of AVPs that are examined in
+%% the message (from the front), to avoid searching entire message in
+%% case it's known that peers follow RFC 6733's recommendation that
+%% Session-Id be placed at the head of a message. The default is to
+%% search the entire message.
+%%
+%% The id member restricts the optional value mapping to session ids
+%% whose DiamterIdentity is one of those specified. Set this to the
+%% list of Diameter identities advertised by the service in question
+%% (typically one) to ensure that only locally generated session ids
+%% are mapped; or to the empty list to disable the mapping.
+%%
+%% The default member determines where to handle a message whose
+%% Session-Id isn't found or whose optional value isn't mapped to the
+%% name of a connected node. The atom local says the local node, an
+%% MFA is invoked on Session-Id | false, the name of the diameter
+%% service, and the message binary, and should return either a node()
+%% or false to discard the message. Defaults to {diameter_dist, hash, []}.
+%%
+%% The dispatch member determines how the pid() of the request handler
+%% process is retrieved. An MFA is applied to a previously selected
+%% node(), and the module, function, and arguments list to apply in
+%% the handler process to handle the request, the MFA being supplied
+%% by diameter, and returns pid() | discard. A list is equivalent to
+%% {erlang, spawn_opt, []}. Defaults to [].
+%%
+%% This can be used with search = 0 to route on something other than
+%% Session-Id, but this is probably no simpler than just implementing
+%% an own spawn_opt callback. (Except with the default dispatch possibly.)
+%%
+%% Note that if the peer is also implemented with OTP diameter and
+%% generating session ids with diameter:session_id/1 then
+%% route_session/2 can map an optional value to a local node that
+%% happens to have the same name as one of the peer's nodes. This
+%% could lead to an uneven distribution; for example, if the peer
+%% nodes are a subset of the local nodes. In practice, it's typically
+%% known if it's peers or the local node originating sessions; if the
+%% former then setting id = [] disables the optional value mapping, if
+%% the latter then setting default = local disables the hashing.
+-spec route_session(ReqT :: request(), Opts)
+ -> discard
+ | pid()
+ when Opts :: pos_integer() %% aka #{search => N}
+ | list() %% aka #{dispatch => Opts}
+ | #{search => non_neg_integer(), %% limit number of examined AVPs
+ id => [binary()], %% restrict optional value map on DiamIdent
+ default => local %% handle locally
+ | discard
+ | mfa(), %% return node() | false
+ dispatch => list() %% spawn options
+ | mfa()}. %% (Node, M, F, A) -> pid() | discard
+
+route_session(ReqT, Opts) ->
+ {_, Bin} = Info = diameter_traffic:request_info(ReqT),
+ Sid = session_id(avps(Bin), search(Opts)),
+ Node = default(node_of_session_id(Sid, Opts), Sid, Opts, Info),
+ dispatch(Node, ReqT, dispatch(Opts)).
+
+%% avps/1
+
+avps(<<_:20/binary, Bin/binary>>) ->
+ Bin;
+
+avps(_) ->
+ false.
+
+%% dispatch/3
+
+dispatch(false, _, _) ->
+ discard;
+
+dispatch(Node, ReqT, {M,F,A}) ->
+ apply(M, F, [Node, diameter_traffic, request, [ReqT] | A]);
+
+dispatch(Node, ReqT, Opts) ->
+ spawn_opt(Node, diameter_traffic, request, [ReqT], Opts).
+
+%% route_session/1
+
+route_session(ReqT) ->
+ route_session(ReqT, []).
+
+%% node_of_session_id/2
+%%
+%% Return the node name encoded as optional value in a Session-Id,
+%% assuming the id has been created with diameter:session_id/0. Lookup
+%% the node name to ensure we don't convert arbitrary binaries to
+%% atom.
+
+node_of_session_id([Id, _, _, Bin], #{id := Ids}) ->
+ lists:member(Id, Ids) andalso nodemap(Bin);
+
+node_of_session_id([_, _, _, Bin], _) ->
+ nodemap(Bin);
+
+node_of_session_id(_, _) ->
+ false.
+
+%% nodemap/1
+
+nodemap(Bin) ->
+ try
+ ets:lookup_element(?NODE_TABLE, Bin, 2)
+ catch
+ error: badarg -> false
+ end.
+
+%% session_id/2
+
+session_id(_, 0) -> %% give up
+ false;
+
+%% Session-Id = Command Code 263, V-bit = 0.
+session_id(<<263:32, 0:1, _:7, Len:24, _/binary>> = Bin, _) ->
+ case Bin of
+ <<Avp:Len/binary, _/binary>> ->
+ <<_:8/binary, Sid/binary>> = Avp,
+ split(Sid);
+ _ ->
+ false
+ end;
+
+%% Jump to the next AVP. This is potentially costly for a message with
+%% many AVPs and no Session-Id, which an attacker is prone to send.
+%% 8.8 or RFC 6733 says that Session-Id SHOULD (but not MUST) appear
+%% immediately following the Diameter Header, so there is no
+%% guarantee.
+session_id(<<_:40, Len:24, _/binary>> = Bin, N) ->
+ Pad = (4 - (Len rem 4)) rem 4,
+ case Bin of
+ <<_:Len/binary, _:Pad/binary, Rest/binary>> ->
+ session_id(Rest, if N == infinity -> N; true -> N-1 end);
+ _ ->
+ false
+ end;
+
+session_id(_, _) ->
+ false.
+
+%% split/1
+%%
+%% Split a Session-Id at no more than three semicolons: the optional
+%% value (if any) follows the third. binary:split/2 does better than
+%% matching character by character, especially when the pattern is
+%% compiled.
+
+split(Bin) ->
+ split(3, Bin, pattern()).
+
+%% split/3
+
+split(0, Bin, _) ->
+ [Bin];
+
+split(N, Bin, Pattern) ->
+ [H|T] = binary:split(Bin, Pattern),
+ [H | case T of
+ [] ->
+ T;
+ [Rest] ->
+ split(N-1, Rest, Pattern)
+ end].
+
+%% pattern/0
+%%
+%% Since this is being called in a watchdog process, compile the
+%% pattern once and maintain it in the process dictionary.
+
+pattern() ->
+ case get(?MODULE) of
+ undefined ->
+ CP = binary:compile_pattern(<<$;>>), %% tuple
+ put(?MODULE, CP),
+ CP;
+ CP ->
+ CP
+ end.
+
+%% dispatch/1
+
+dispatch(#{} = Opts) ->
+ maps:get(dispatch, Opts, []);
+
+dispatch(Opts)
+ when is_list(Opts) ->
+ Opts;
+
+dispatch(_) ->
+ [].
+
+%% search/1
+%%
+%% Bound number of AVPs examined when looking for Session-Id.
+
+search(#{search := N})
+ when is_integer(N), 0 =< N ->
+ N;
+
+search(N)
+ when is_integer(N), 0 =< N ->
+ N;
+
+search(_) ->
+ infinity.
+
+%% default/3
+%%
+%% Choose a node when Session-Id lookup has failed.
+
+default(false, _, #{default := discard}, _) ->
+ false;
+
+default(false, _, #{default := local}, _) ->
+ node();
+
+default(false, Sid, #{default := {M,F,A}}, Info) ->
+ {ServiceName, Bin} = Info,
+ apply(M, F, [Sid, ServiceName, Bin | A]); %% node() | false
+
+default(false, Sid, _, Info) -> %% aka {?MODULE, hash, []}
+ {ServiceName, Bin} = Info,
+ hash(Sid, ServiceName, Bin);
+
+default(Node, _, _, _) ->
+ Node.
+
+%% ===========================================================================
+
+%% hash/3
+%%
+%% Consistent hashing of Session-Id to an attached node, or the local
+%% node if Session-Id = false or no attached nodes.
+
+hash(Sid, ServiceName, _) ->
+ case false /= Sid andalso attached(ServiceName) of
+ [_|_] = Nodes ->
+ hash(Sid, Nodes);
+ _ ->
+ node()
+ end.
+
+%% hash/2
+%%
+%% Consistent hashing on arbitrary key/values. Returns false if the
+%% list is empty.
+
+%% No key or no values.
+hash(_, []) ->
+ false;
+
+%% Not much choice.
+hash(_, [Value]) ->
+ Value;
+
+%% Hash on a circle and choose the closest predecessor.
+hash(Key, Values) ->
+ Hash = ?HASH(Key),
+ tl(lists:foldl(fun(V,A) ->
+ choose(Hash, [?HASH({Key, V}) | V], A)
+ end,
+ false, %% < list()
+ Values)).
+
+%% choose/3
+
+choose(Hash, [Hash1 | _] = T, [Hash2 | _])
+ when Hash1 =< Hash, Hash < Hash2 ->
+ T;
+
+choose(Hash, [Hash1 | _], [Hash2 | _] = T)
+ when Hash2 =< Hash, Hash < Hash1 ->
+ T;
+
+choose(_, T1, T2) ->
+ max(T1, T2).
+
+%% ===========================================================================
+
+%% attach/1
+%%
+%% Register the local node as a handler of incoming requests for the
+%% specified services when using the route_session/2 spawn_opt
+%% callback.
+
+attach(ServiceNames) ->
+ abcast({attach, node(), ServiceNames}).
+
+%% detach/1
+%%
+%% Deregister the local node as a handler of incoming requests.
+
+detach(ServiceNames) ->
+ abcast({detach, node(), ServiceNames}).
+
+%% abcast/1
+
+abcast(T) ->
+ gen_server:abcast([node() | nodes()], ?SERVER, T),
+ ok.
+
+%% attached/1
+
+attached(ServiceName) ->
+ try
+ ets:lookup_element(?SERVICE_TABLE, ServiceName, 2)
+ catch
+ error: badarg -> []
+ end.
+
+%% cast/2
+
+cast(Node, T) ->
+ gen_server:cast({?SERVER, Node}, T).
+
+%% attach/2
+
+attach(Node, S) ->
+ case sets:to_list(S) of
+ [] ->
+ ok;
+ Services ->
+ cast(Node, {attach, node(), Services})
+ end.
+
+%% ===========================================================================
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, _Args = [], _Opts = []).
+
+%% init/1
+%%
+%% Maintain [node() | nodes()] in a table that maps from binary-valued
+%% names, so we can lookup the corresponding atoms rather than convert
+%% binaries that aren't necessarily node names.
+
+init([]) ->
+ ets:new(?NODE_TABLE, [set, named_table]),
+ ets:new(?SERVICE_TABLE, [bag, named_table]),
+ ok = net_kernel:monitor_nodes(true, [{node_type, all}, nodedown_reason]),
+ ets:insert(?NODE_TABLE, [{?B(N), N} || N <- [node() | nodes()]]),
+ abcast({attach, node()}),
+ {ok, sets:new()}.
+
+%% handle_call/3
+
+handle_call(_, _From, S) ->
+ {reply, nok, S}.
+
+%% handle_cast/2
+
+%% Remote node is asking which services the local node wants to handle.
+handle_cast({attach, Node}, S)
+ when Node /= node() ->
+ attach(Node, S),
+ {noreply, S};
+
+%% Node wants to handle incoming requests ...
+handle_cast({attach, Node, ServiceNames}, S) ->
+ ets:insert(?SERVICE_TABLE, [{N, Node} || N <- ServiceNames]),
+ {noreply, case node() of
+ Node ->
+ sets:union(S, sets:from_list(ServiceNames));
+ _ ->
+ S
+ end};
+
+%% ... or not.
+handle_cast({detach, Node, ServiceNames}, S) ->
+ ets:select_delete(?SERVICE_TABLE, [{{'$1', Node},
+ [?ORCOND([{'==', '$1', {const, N}}
+ || N <- ServiceNames])],
+ [true]}]),
+ {noreply, case node() of
+ Node ->
+ sets:subtract(S, sets:from_list(ServiceNames));
+ _ ->
+ S
+ end};
+
+handle_cast(_, S) ->
+ {noreply, S}.
+
+%% handle_info/2
+
+handle_info({nodeup, Node, _}, S) ->
+ ets:insert(?NODE_TABLE, {?B(Node), Node}),
+ cast(Node, {attach, node()}), %% ask which services remote node handles
+ attach(Node, S), %% say which service local node handles
+ {noreply, S};
+
+handle_info({nodedown, Node, _}, S) ->
+ ets:delete(?NODE_TABLE, ?B(Node)),
+ ets:select_delete(?SERVICE_TABLE, [{{'_', Node}, [], [true]}]),
+ {noreply, S};
+
+handle_info(_, S) ->
+ {noreply, S}.
+
+%% terminate/2
+
+terminate(_, _) ->
+ ok.
+
+%% code_change/3
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/lib/diameter/src/base/diameter_misc_sup.erl b/lib/diameter/src/base/diameter_misc_sup.erl
index 343688be23..fec5a41b5c 100644
--- a/lib/diameter/src/base/diameter_misc_sup.erl
+++ b/lib/diameter/src/base/diameter_misc_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -35,6 +35,7 @@
diameter_stats, %% statistics counter management
diameter_reg, %% service/property publishing
diameter_peer, %% remote peer manager
+ diameter_dist, %% request distribution
diameter_config]). %% configuration/restart
%% start_link/0
diff --git a/lib/diameter/src/base/diameter_traffic.erl b/lib/diameter/src/base/diameter_traffic.erl
index 2d3e4a2ac9..8423e30269 100644
--- a/lib/diameter/src/base/diameter_traffic.erl
+++ b/lib/diameter/src/base/diameter_traffic.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2013-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2013-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -42,8 +42,12 @@
peer_up/1,
peer_down/1]).
+%% towards diameter_dist
+-export([request_info/1]).
+
%% internal
-export([send/1, %% send from remote node
+ request/1, %% process request in handler process
init/1]). %% monitor process start
-include_lib("diameter/include/diameter.hrl").
@@ -232,7 +236,7 @@ incr_rc(Dir, Pkt, TPid, MsgDict, AppDict, Dict0) ->
-spec receive_message(pid(), Route, #diameter_packet{}, module(), RecvData)
-> pid() %% request handler
| boolean() %% answer, known request or not
- | discard %% request discarded by MFA
+ | discard %% request discarded
when Route :: {Handler, RequestRef, TPid}
| Ack,
RecvData :: {[SpawnOpt], #recvdata{}},
@@ -252,7 +256,8 @@ receive_message(TPid, Route, Pkt, Dict0, RecvData) ->
recv(true, Ack, TPid, Pkt, Dict0, T)
when is_boolean(Ack) ->
{Opts, RecvData} = T,
- spawn_request(Ack, TPid, Pkt, Dict0, RecvData, Opts);
+ AppT = find_app(TPid, Pkt, RecvData),
+ ack(Ack, TPid, spawn_request(AppT, Opts, Ack, TPid, Pkt, Dict0, RecvData));
%% ... answer to known request ...
recv(false, {Pid, Ref, TPid}, _, Pkt, Dict0, _) ->
@@ -274,58 +279,73 @@ recv(false, false, TPid, Pkt, _, _) ->
incr(TPid, {{unknown, 0}, recv, discarded}),
false.
-%% spawn_request/6
+%% spawn_request/7
+
+spawn_request(false, _, _, _, _, _, _) -> %% no transport
+ discard;
-%% An MFA should return a pid() or the atom 'discard'. The latter
-%% results in an acknowledgment back to the transport process when
-%% appropriate, to ensure that send/recv callbacks can count
-%% outstanding requests. Acknowledgement is implicit if the
+%% An MFA should return the pid() of a process in which the argument
+%% fun in applied, or the atom 'discard' if the fun is not applied.
+%% The latter results in an acknowledgment back to the transport
+%% process when appropriate, to ensure that send/recv callbacks can
+%% count outstanding requests. Acknowledgement is implicit if the
%% handler process dies (in a handle_request callback for example).
-spawn_request(Ack, TPid, Pkt, Dict0, RecvData, {M,F,A}) ->
- ReqF = fun() ->
- ack(Ack, TPid, recv_request(Ack, TPid, Pkt, Dict0, RecvData))
- end,
- ack(Ack, TPid, apply(M, F, [ReqF | A]));
+spawn_request(AppT, {M,F,A}, Ack, TPid, Pkt, Dict0, RecvData) ->
+ %% Term to pass to request/1 in an appropriate process. Module
+ %% diameter_dist implements callbacks.
+ ReqT = {Pkt, AppT, Ack, TPid, Dict0, RecvData},
+ apply(M, F, [ReqT | A]);
%% A spawned process acks implicitly when it dies, so there's no need
%% to handle 'discard'.
-spawn_request(Ack, TPid, Pkt, Dict0, RecvData, Opts) ->
+spawn_request(AppT, Opts, Ack, TPid, Pkt, Dict0, RecvData) ->
spawn_opt(fun() ->
- recv_request(Ack, TPid, Pkt, Dict0, RecvData)
+ recv_request(Ack, TPid, Pkt, Dict0, RecvData, AppT)
end,
Opts).
+%% request_info/1
+%%
+%% Limited request information for diameter_dist.
+
+request_info({Pkt, _AppT, _Ack, _TPid, _Dict0, RecvData} = _ReqT) ->
+ {RecvData#recvdata.service_name, Pkt#diameter_packet.bin}.
+
+%% request/1
+%%
+%% Called from a handler process chosen by a transport spawn_opt MFA
+%% to process an incoming request.
+
+request({Pkt, AppT, Ack, TPid, Dict0, RecvData} = _ReqT) ->
+ ack(Ack, TPid, recv_request(Ack, TPid, Pkt, Dict0, RecvData, AppT)).
+
%% ack/3
ack(Ack, TPid, RC) ->
- RC == discard andalso Ack andalso (TPid ! {send, false}),
+ RC == discard
+ andalso Ack
+ andalso (TPid ! {send, false}),
RC.
%% ---------------------------------------------------------------------------
-%% recv_request/5
+%% recv_request/6
%% ---------------------------------------------------------------------------
-spec recv_request(Ack :: boolean(),
TPid :: pid(),
#diameter_packet{},
Dict0 :: module(),
- #recvdata{})
+ #recvdata{},
+ AppT :: {#diameter_app{}, #diameter_caps{}}
+ | #diameter_caps{}) %% no suitable app
-> ok %% answer was sent
- | discard %% or not
- | false. %% no transport
-
-recv_request(Ack,
- TPid,
- #diameter_packet{header = #diameter_header{application_id = Id}}
- = Pkt,
- Dict0,
- #recvdata{peerT = PeerT,
- apps = Apps,
- counters = Count}
- = RecvData) ->
+ | discard. %% or not
+
+recv_request(Ack, TPid, Pkt, Dict0, RecvData, AppT) ->
Ack andalso (TPid ! {handler, self()}),
- case diameter_service:find_incoming_app(PeerT, TPid, Id, Apps) of
+ case AppT of
{#diameter_app{id = Aid, dictionary = AppDict} = App, Caps} ->
+ Count = RecvData#recvdata.counters,
Count andalso incr(recv, Pkt, TPid, AppDict),
DecPkt = decode(Aid, AppDict, RecvData, Pkt),
Count andalso incr_error(recv, DecPkt, TPid, AppDict),
@@ -349,11 +369,20 @@ recv_request(Ack,
Dict0,
RecvData,
DecPkt,
- [[]]);
- false = No -> %% transport has gone down
- No
+ [[]])
end.
+%% find_app/3
+%%
+%% Lookup the application of a received Diameter request on the node
+%% on which it's received.
+
+find_app(TPid,
+ #diameter_packet{header = #diameter_header{application_id = Id}},
+ #recvdata{peerT = PeerT,
+ apps = Apps}) ->
+ diameter_service:find_incoming_app(PeerT, TPid, Id, Apps).
+
%% decode/4
decode(Id, Dict, #recvdata{codec = Opts}, Pkt) ->
diff --git a/lib/diameter/src/diameter.appup.src b/lib/diameter/src/diameter.appup.src
index 4e6b983bac..52263633fb 100644
--- a/lib/diameter/src/diameter.appup.src
+++ b/lib/diameter/src/diameter.appup.src
@@ -60,7 +60,8 @@
{"2.1.3", [{restart_application, diameter}]}, %% 20.2
{"2.1.4", [{restart_application, diameter}]}, %% 20.3
{"2.1.4.1", [{restart_application, diameter}]}, %% 20.3.8.19
- {"2.1.5", [{update, diameter_peer_fsm}]} %% 21.0
+ {"2.1.5", [{restart_application, diameter}]}, %% 21.0
+ {"2.1.6", [{restart_application, diameter}]} %% 21.1
],
[
{"0.9", [{restart_application, diameter}]},
@@ -102,6 +103,7 @@
{"2.1.3", [{restart_application, diameter}]},
{"2.1.4", [{restart_application, diameter}]},
{"2.1.4.1", [{restart_application, diameter}]},
- {"2.1.5", [{update, diameter_peer_fsm}]}
+ {"2.1.5", [{restart_application, diameter}]},
+ {"2.1.6", [{restart_application, diameter}]}
]
}.
diff --git a/lib/diameter/src/modules.mk b/lib/diameter/src/modules.mk
index bb86de016a..d16292bb88 100644
--- a/lib/diameter/src/modules.mk
+++ b/lib/diameter/src/modules.mk
@@ -1,7 +1,7 @@
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2010-2017. All Rights Reserved.
+# Copyright Ericsson AB 2010-2019. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -40,6 +40,7 @@ RT_MODULES = \
base/diameter_config \
base/diameter_config_sup \
base/diameter_codec \
+ base/diameter_dist \
base/diameter_gen \
base/diameter_lib \
base/diameter_misc_sup \
diff --git a/lib/diameter/test/diameter_dist_SUITE.erl b/lib/diameter/test/diameter_dist_SUITE.erl
new file mode 100644
index 0000000000..b2e4c35b9a
--- /dev/null
+++ b/lib/diameter/test/diameter_dist_SUITE.erl
@@ -0,0 +1,332 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2019. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%% Tests of traffic between two Diameter nodes, the server being
+%% spread across three Erlang nodes.
+%%
+
+-module(diameter_dist_SUITE).
+
+-export([suite/0,
+ all/0]).
+
+%% testcases
+-export([enslave/1, enslave/0,
+ ping/1,
+ start/1,
+ connect/1,
+ send/1,
+ stop/1, stop/0]).
+
+%% diameter callbacks
+-export([peer_up/3,
+ peer_down/3,
+ pick_peer/4,
+ prepare_request/3,
+ prepare_retransmit/3,
+ handle_answer/4,
+ handle_error/4,
+ handle_request/3]).
+
+-export([call/1]).
+
+-include("diameter.hrl").
+-include("diameter_gen_base_rfc6733.hrl").
+
+%% ===========================================================================
+
+-define(util, diameter_util).
+
+-define(CLIENT, 'CLIENT').
+-define(SERVER, 'SERVER').
+-define(REALM, "erlang.org").
+-define(DICT, diameter_gen_base_rfc6733).
+-define(ADDR, {127,0,0,1}).
+
+%% Config for diameter:start_service/2.
+-define(SERVICE(Host),
+ [{'Origin-Host', Host ++ [$.|?REALM]},
+ {'Origin-Realm', ?REALM},
+ {'Host-IP-Address', [?ADDR]},
+ {'Vendor-Id', 12345},
+ {'Product-Name', "OTP/diameter"},
+ {'Auth-Application-Id', [?DICT:id()]},
+ {'Origin-State-Id', origin()},
+ {spawn_opt, {diameter_dist, route_session, [#{id => []}]}},
+ {sequence, fun sequence/0},
+ {string_decode, false},
+ {application, [{dictionary, ?DICT},
+ {module, ?MODULE},
+ {request_errors, callback},
+ {answer_errors, callback}]}]).
+
+-define(SUCCESS, 2001).
+-define(BUSY, 3004).
+-define(LOGOUT, ?'DIAMETER_BASE_TERMINATION-CAUSE_LOGOUT').
+-define(MOVED, ?'DIAMETER_BASE_TERMINATION-CAUSE_USER_MOVED').
+-define(TIMEOUT, ?'DIAMETER_BASE_TERMINATION-CAUSE_SESSION_TIMEOUT').
+
+-define(L, atom_to_list).
+-define(A, list_to_atom).
+
+%% The order here is significant and causes the server to listen
+%% before the clients connect. The server listens on the first node,
+%% and distributes requests to the other two.
+-define(NODES, [{server0, ?SERVER},
+ {server1, ?SERVER},
+ {server2, ?SERVER},
+ {client, ?CLIENT}]).
+
+%% Options to ct_slave:start/2.
+-define(TIMEOUTS, [{T, 15000} || T <- [boot_timeout,
+ init_timeout,
+ start_timeout]]).
+
+%% ===========================================================================
+
+suite() ->
+ [{timetrap, {seconds, 60}}].
+
+all() ->
+ [enslave,
+ ping,
+ start,
+ connect,
+ send,
+ stop].
+
+%% ===========================================================================
+%% start/stop testcases
+
+%% enslave/1
+%%
+%% Start four slave nodes, three to implement a Diameter server,
+%% one to implement a client.
+
+enslave() ->
+ [{timetrap, {seconds, 30*length(?NODES)}}].
+
+enslave(Config) ->
+ Here = filename:dirname(code:which(?MODULE)),
+ Ebin = filename:join([Here, "..", "ebin"]),
+ Dirs = [Here, Ebin],
+ Nodes = [{N,S} || {M,S} <- ?NODES, N <- [slave(M, Dirs)]],
+ ?util:write_priv(Config, nodes, [{N,S} || {{N,ok},S} <- Nodes]),
+ [] = [{T,S} || {{_,E} = T, S} <- Nodes, E /= ok].
+
+slave(Name, Dirs) ->
+ add_pathsa(Dirs, ct_slave:start(Name, ?TIMEOUTS)).
+
+add_pathsa(Dirs, {ok, Node}) ->
+ {Node, rpc:call(Node, code, add_pathsa, [Dirs])};
+add_pathsa(_, No) ->
+ {No, error}.
+
+%% ping/1
+%%
+%% Ensure the server nodes are connected so that diameter_dist can attach.
+
+ping({S, Nodes}) ->
+ ?SERVER = S,
+ [N || {N,_} <- Nodes,
+ node() /= N,
+ pang <- [net_adm:ping(N)]];
+
+ping(Config) ->
+ Nodes = lists:droplast(?util:read_priv(Config, nodes)),
+ [] = [{N,RC} || {N,S} <- Nodes,
+ RC <- [rpc:call(N, ?MODULE, ping, [{S,Nodes}])],
+ RC /= []].
+
+%% start/1
+%%
+%% Start diameter services.
+
+start(SvcName)
+ when is_atom(SvcName) ->
+ ok = diameter:start(),
+ ok = diameter:start_service(SvcName, ?SERVICE((?L(SvcName))));
+
+start(Config) ->
+ Nodes = ?util:read_priv(Config, nodes),
+ [] = [{N,RC} || {N,S} <- Nodes,
+ RC <- [rpc:call(N, ?MODULE, start, [S])],
+ RC /= ok].
+
+sequence() ->
+ sequence(sname()).
+
+sequence(client) ->
+ {0,32};
+sequence(Server) ->
+ "server" ++ N = ?L(Server),
+ {list_to_integer(N), 30}.
+
+origin() ->
+ origin(sname()).
+
+origin(client) ->
+ 99;
+origin(Server) ->
+ "server" ++ N = ?L(Server),
+ list_to_integer(N).
+
+%% connect/1
+%%
+%% Establish one connection from the client, terminated on the first
+%% server node, the others handling requests.
+
+connect({?SERVER, Config, [{Node, _} | _]}) ->
+ if Node == node() -> %% server0
+ ?util:write_priv(Config, lref, {Node, ?util:listen(?SERVER, tcp)});
+ true ->
+ diameter_dist:attach([?SERVER])
+ end,
+ ok;
+
+connect({?CLIENT, Config, _}) ->
+ ?util:connect(?CLIENT, tcp, ?util:read_priv(Config, lref)),
+ ok;
+
+connect(Config) ->
+ Nodes = ?util:read_priv(Config, nodes),
+ [] = [{N,RC} || {N,S} <- Nodes,
+ RC <- [rpc:call(N, ?MODULE, connect, [{S, Config, Nodes}])],
+ RC /= ok].
+
+%% stop/1
+%%
+%% Stop the slave nodes.
+
+stop() ->
+ [{timetrap, {seconds, 30*length(?NODES)}}].
+
+stop(_Config) ->
+ [] = [{N,E} || {N,_} <- ?NODES,
+ {error, _, _} = E <- [ct_slave:stop(N)]].
+
+%% ===========================================================================
+%% traffic testcases
+
+%% send/1
+%%
+%% Send 100 requests and ensure the node name sent as User-Name isn't
+%% the node terminating transport.
+
+send(Config) ->
+ send(Config, 100, dict:new()).
+
+%% send/2
+
+send(Config, 0, Dict) ->
+ [{Server0, _} | _] = ?util:read_priv(Config, nodes) ,
+ Node = atom_to_binary(Server0, utf8),
+ {false, _} = {dict:is_key(Node, Dict), dict:to_list(Dict)};
+
+send(Config, N, Dict) ->
+ #diameter_base_STA{'Result-Code' = ?SUCCESS,
+ 'User-Name' = [ServerNode]}
+ = send(Config, str(?LOGOUT)),
+ true = is_binary(ServerNode),
+ send(Config, N-1, dict:update_counter(ServerNode, 1, Dict)).
+
+%% ===========================================================================
+
+str(Cause) ->
+ #diameter_base_STR{'Destination-Realm' = ?REALM,
+ 'Auth-Application-Id' = ?DICT:id(),
+ 'Termination-Cause' = Cause}.
+
+%% send/2
+
+send(Config, Req) ->
+ {Node, _} = lists:last(?util:read_priv(Config, nodes)),
+ rpc:call(Node, ?MODULE, call, [Req]).
+
+%% call/1
+
+call(Req) ->
+ diameter:call(?CLIENT, ?DICT, Req, []).
+
+%% sname/0
+
+sname() ->
+ ?A(hd(string:tokens(?L(node()), "@"))).
+
+%% ===========================================================================
+%% diameter callbacks
+
+%% peer_up/3
+
+peer_up(_SvcName, _Peer, State) ->
+ State.
+
+%% peer_down/3
+
+peer_down(_SvcName, _Peer, State) ->
+ State.
+
+%% pick_peer/4
+
+pick_peer([Peer], [], ?CLIENT, _State) ->
+ {ok, Peer}.
+
+%% prepare_request/3
+
+prepare_request(Pkt, ?CLIENT, {_Ref, Caps}) ->
+ #diameter_packet{msg = Req}
+ = Pkt,
+ #diameter_caps{origin_host = {OH, _},
+ origin_realm = {OR, _}}
+ = Caps,
+ {send, Req#diameter_base_STR{'Origin-Host' = OH,
+ 'Origin-Realm' = OR,
+ 'Session-Id' = diameter:session_id(OH)}}.
+
+%% prepare_retransmit/3
+
+prepare_retransmit(_, ?CLIENT, _) ->
+ discard.
+
+%% handle_answer/5
+
+handle_answer(Pkt, _Req, ?CLIENT, _Peer) ->
+ #diameter_packet{msg = Rec, errors = []} = Pkt,
+ Rec.
+
+%% handle_error/5
+
+handle_error(Reason, _Req, ?CLIENT, _Peer) ->
+ {error, Reason}.
+
+%% handle_request/3
+
+handle_request(Pkt, ?SERVER, {_, Caps}) ->
+ #diameter_packet{msg = #diameter_base_STR{'Session-Id' = SId}}
+ = Pkt,
+ #diameter_caps{origin_host = {OH, _},
+ origin_realm = {OR, _}}
+ = Caps,
+ {reply, #diameter_base_STA{'Result-Code' = ?SUCCESS,
+ 'Session-Id' = SId,
+ 'Origin-Host' = OH,
+ 'Origin-Realm' = OR,
+ 'User-Name' = [atom_to_binary(node(), utf8)]}}.
diff --git a/lib/diameter/test/diameter_distribution_SUITE.erl b/lib/diameter/test/diameter_distribution_SUITE.erl
index 5146f68ff1..5fe02284ae 100644
--- a/lib/diameter/test/diameter_distribution_SUITE.erl
+++ b/lib/diameter/test/diameter_distribution_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2013-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2013-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -76,6 +76,7 @@
{share_peers, peers()},
{use_shared_peers, peers()},
{restrict_connections, false},
+ {spawn_opt, {diameter_dist, spawn_local, []}},
{sequence, fun sequence/0},
{application, [{dictionary, ?DICT},
{module, ?MODULE},
@@ -125,7 +126,7 @@ all() ->
%% enslave/1
%%
%% Start four slave nodes, one to implement a Diameter server,
-%% two three to implement a client.
+%% three to implement a client.
enslave() ->
[{timetrap, {seconds, 30*length(?NODES)}}].
@@ -331,6 +332,8 @@ prepare_request(Pkt, ?CLIENT, {_Ref, Caps}, {_, client0}) ->
'Origin-Realm' = OR,
'Session-Id' = diameter:session_id(OH)}}.
+%% prepare_retransmit/4
+
prepare_retransmit(Pkt, ?CLIENT, _, {_, client0}) ->
#diameter_packet{msg = #diameter_base_STR{'Termination-Cause' = ?MOVED}}
= Pkt, %% assert
diff --git a/lib/diameter/test/diameter_pool_SUITE.erl b/lib/diameter/test/diameter_pool_SUITE.erl
index 97c16940ff..a36a4fa17a 100644
--- a/lib/diameter/test/diameter_pool_SUITE.erl
+++ b/lib/diameter/test/diameter_pool_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2015-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2015-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -51,6 +51,7 @@
{'Auth-Application-Id', [0]}, %% common
{'Acct-Application-Id', [3]}, %% accounting
{restrict_connections, false},
+ {spawn_opt, {diameter_dist, route_session, []}},
{application, [{alias, common},
{dictionary, diameter_gen_base_rfc6733},
{module, diameter_callback}]},
diff --git a/lib/diameter/test/diameter_traffic_SUITE.erl b/lib/diameter/test/diameter_traffic_SUITE.erl
index 434aef01dd..47b00c25a2 100644
--- a/lib/diameter/test/diameter_traffic_SUITE.erl
+++ b/lib/diameter/test/diameter_traffic_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -539,8 +539,7 @@ add_transports(Config) ->
++ [{unordered, unordered()} || T == sctp],
[{capabilities_cb, fun capx/2},
{pool_size, 8}
- | server_apps()]
- ++ [{spawn_opt, {erlang, spawn, []}} || CS]),
+ | server_apps()]),
Cs = [?util:connect(CN,
[T, {sender, CS} | client_opts(T)],
LRef,
diff --git a/lib/diameter/test/modules.mk b/lib/diameter/test/modules.mk
index 0c73adca12..90b0a25d5f 100644
--- a/lib/diameter/test/modules.mk
+++ b/lib/diameter/test/modules.mk
@@ -1,7 +1,7 @@
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2010-2017. All Rights Reserved.
+# Copyright Ericsson AB 2010-2019. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -31,6 +31,7 @@ MODULES = \
diameter_codec_test \
diameter_config_SUITE \
diameter_compiler_SUITE \
+ diameter_dist_SUITE \
diameter_distribution_SUITE \
diameter_dpr_SUITE \
diameter_event_SUITE \
diff --git a/lib/ftp/test/ftp_SUITE.erl b/lib/ftp/test/ftp_SUITE.erl
index 81147b5821..0b070ee8cb 100644
--- a/lib/ftp/test/ftp_SUITE.erl
+++ b/lib/ftp/test/ftp_SUITE.erl
@@ -96,6 +96,7 @@ ftp_tests()->
recv_chunk,
recv_chunk_twice,
recv_chunk_three_times,
+ recv_chunk_delay,
type,
quote,
error_elogin,
@@ -732,21 +733,28 @@ recv_chunk(Pid, Acc) ->
Error
end.
-%% Make new test case that uses this or new code
-%% only test one thing at the time
-%% delay_recv_chunk(Pid) ->
-%% delay_recv_chunk(Pid, <<>>).
-%% delay_recv_chunk(Pid, Acc) ->
-%% ct:pal("FOO ~p", [byte_size(Acc)]),
-%% case ftp:recv_chunk(Pid) of
-%% ok ->
-%% {ok, Acc};
-%% {ok, Bin} ->
-%% ct:sleep(100),
-%% delay_recv_chunk(Pid, <<Acc/binary, Bin/binary>>);
-%% Error ->
-%% Error
-%% end.
+recv_chunk_delay(Config0) when is_list(Config0) ->
+ File1 = "big_file1.txt",
+ Contents = list_to_binary(lists:duplicate(1000, lists:seq(0,255))),
+ Config = set_state([reset, {mkfile,File1,Contents}], Config0),
+ Pid = proplists:get_value(ftp, Config),
+ ok = ftp:recv_chunk_start(Pid, id2ftp(File1,Config)),
+ {ok, ReceivedContents} = delay_recv_chunk(Pid),
+ find_diff(ReceivedContents, Contents).
+
+delay_recv_chunk(Pid) ->
+ delay_recv_chunk(Pid, <<>>).
+delay_recv_chunk(Pid, Acc) ->
+ ct:pal("Recived size ~p", [byte_size(Acc)]),
+ case ftp:recv_chunk(Pid) of
+ ok ->
+ {ok, Acc};
+ {ok, Bin} ->
+ ct:sleep(100),
+ delay_recv_chunk(Pid, <<Acc/binary, Bin/binary>>);
+ Error ->
+ Error
+ end.
%%-------------------------------------------------------------------------
type() ->
diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml
index 15dbdb47dc..7f9609d5c1 100644
--- a/lib/kernel/doc/src/kernel_app.xml
+++ b/lib/kernel/doc/src/kernel_app.xml
@@ -369,6 +369,14 @@ MaxT = TickTime + TickTime / 4</code>
performed. This option ensures that <c>global</c> is
synchronized.</p>
</item>
+ <tag><c>start_distribution = true | false</c></tag>
+ <item>
+ <p>Starts all distribution services, such as <c>rpc</c>,
+ <c>global</c>, and <c>net_kernel</c> if the parameter is
+ <c>true</c>. This parameter is to be set to <c>false</c>
+ for systems who want to disable all distribution functionality.</p>
+ <p>Defaults to <c>true</c>.</p>
+ </item>
<tag><c>start_dist_ac = true | false</c></tag>
<item>
<p>Starts the <c>dist_ac</c> server if the parameter is
@@ -510,11 +518,13 @@ MaxT = TickTime + TickTime / 4</code>
parameters for Logger are not set.</p>
<taglist>
<tag><c>error_logger</c></tag>
- <item>Replaced by setting the type of the default
- <seealso marker="logger_std_h#type"><c>logger_std_h</c></seealso>
- to the same value. Example:
+ <item>Replaced by setting the <seealso
+ marker="logger_std_h#type"><c>type</c></seealso>, and possibly
+ <seealso marker="logger_std_h#file"><c>file</c></seealso> and
+ <seealso marker="logger_std_h#modes"><c>modes</c></seealso>
+ parameters of the default <c>logger_std_h</c> handler. Example:
<code type="none">
-erl -kernel logger '[{handler,default,logger_std_h,#{config=>#{type=>{file,"/tmp/erlang.log"}}}}]'
+erl -kernel logger '[{handler,default,logger_std_h,#{config=>#{file=>"/tmp/erlang.log"}}}]'
</code>
</item>
<tag><c>error_logger_format_depth</c></tag>
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
index e6448e144e..ebebcaa1ae 100644
--- a/lib/kernel/doc/src/logger.xml
+++ b/lib/kernel/doc/src/logger.xml
@@ -66,7 +66,7 @@ logger:error("error happened because: ~p", [Reason]). % Without macro
[{kernel,
[{logger,
[{handler, default, logger_std_h,
- #{config => #{type => {file, "path/to/file.log"}}}}]}]}].
+ #{config => #{file => "path/to/file.log"}}}]}]}].
</code>
<p>
For more information about:
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
index 03b9edcf8f..8458ffa042 100644
--- a/lib/kernel/doc/src/logger_chapter.xml
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -801,7 +801,7 @@ logger:debug(#{got => connection_request, id => Id, state => State},
[{kernel,
[{logger,
[{handler, default, logger_std_h, % {handler, HandlerId, Module,
- #{config => #{type => {file,"log/erlang.log"}}}} % Config}
+ #{config => #{file => "log/erlang.log"}}} % Config}
]}]}].
</code>
<p>Modify the default handler to print each log event as a
@@ -831,10 +831,10 @@ logger:debug(#{got => connection_request, id => Id, state => State},
[{logger,
[{handler, default, logger_std_h,
#{level => error,
- config => #{type => {file, "log/erlang.log"}}}},
+ config => #{file => "log/erlang.log"}}},
{handler, info, logger_std_h,
#{level => debug,
- config => #{type => {file, "log/debug.log"}}}}
+ config => #{file => "log/debug.log"}}}
]}]}].
</code>
</section>
@@ -1004,10 +1004,10 @@ ok</pre>
<p>Then, add a new handler which prints to file. You can use the
handler
module <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>,
- and specify type <c>{file,File}</c>.:</p>
+ and configure it to log to file:</p>
<pre>
-4> <input>Config = #{config => #{type => {file,"./info.log"}}, level => info}.</input>
-#{config => #{type => {file,"./info.log"}},level => info}
+4> <input>Config = #{config => #{file => "./info.log"}, level => info}.</input>
+#{config => #{file => "./info.log"},level => info}
5> <input>logger:add_handler(myhandler, logger_std_h, Config).</input>
ok</pre>
<p>Since <c>filter_default</c> defaults to <c>log</c>, this
@@ -1246,7 +1246,7 @@ do_log(Fd, LogEvent, #{formatter := {FModule, FConfig}}) ->
<p>A configuration example:</p>
<code type="none">
logger:add_handler(my_standard_h, logger_std_h,
- #{config => #{type => {file,"./system_info.log"},
+ #{config => #{file => "./system_info.log",
sync_mode_qlen => 100,
drop_mode_qlen => 1000,
flush_qlen => 2000}}).
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
index fcd180abd6..5ed1a2f210 100644
--- a/lib/kernel/doc/src/logger_std_h.xml
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -55,30 +55,105 @@
is stored in a sub map with the key <c>config</c>, and can contain the
following parameters:</p>
<taglist>
- <tag><marker id="type"/><c>type</c></tag>
+ <tag><marker id="type"/><c>type = standard_io | standard_error | file</c></tag>
<item>
- <p>This has the value <c>standard_io</c>, <c>standard_error</c>,
- <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>.</p>
- <p>If <c>LogFileOpts</c> is specified, it replaces the default
- list of options used when opening the log file. The default
- list is <c>[raw,append,delayed_write]</c>. One reason to do
- so can be to change <c>append</c> to, for
- example, <c>write</c>, ensuring that the old log is
- truncated when a node is restarted. See the reference manual
- for <seealso marker="file#open-2"><c>file:open/2</c></seealso>
- for more information about file options.</p>
+ <p>Specifies the log destination.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>standard_io</c>, unless
+ parameter <seealso marker="#file"><c>file</c></seealso> is
+ given, in which case it defaults to <c>file</c>.</p>
+ </item>
+ <tag><marker id="file"/><c>file = </c><seealso marker="file#type-filename"><c>file:filename()</c></seealso></tag>
+ <item>
+ <p>This specifies the name of the log file when the handler is
+ of type <c>file</c>.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to the same name as the handler identity, in the
+ current directory.</p>
+ </item>
+ <tag><marker id="modes"/><c>modes = [</c><seealso marker="file#type-mode"><c>file:mode()</c></seealso><c>]</c></tag>
+ <item>
+ <p>This specifies the file modes to use when opening the log
+ file,
+ see <seealso marker="file#open-2"><c>file:open/2</c></seealso>.
+ If <c>modes</c> are not specified, the default list used
+ is <c>[raw,append,delayed_write]</c>. If <c>modes</c> are
+ specified, the list replaces the default modes list with the
+ following adjustments:</p>
+ <list>
+ <item>
+ If <c>raw</c> is not found in the list, it is added.
+ </item>
+ <item>
+ If none of <c>write</c>, <c>append</c> or <c>exclusive</c> is
+ found in the list, <c>append</c> is added.</item>
+ <item>If none of <c>delayed_write</c>
+ or <c>{delayed_write,Size,Delay}</c> is found in the
+ list, <c>delayed_write</c> is added.</item>
+ </list>
<p>Log files are always UTF-8 encoded. The encoding can not be
- changed by setting the option <c>{encoding,Encoding}</c>
- in <c>LogFileOpts</c>.</p>
- <p>Notice that the standard handler does not have support for
- circular logging. Use the disk_log handler,
- <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>,
- for this.</p>
+ changed by setting the mode <c>{encoding,Encoding}</c>.</p>
<p>The value is set when the handler is added, and it can not
be changed in runtime.</p>
- <p>Defaults to <c>standard_io</c>.</p>
+ <p>Defaults to <c>[raw,append,delayed_write]</c>.</p>
+ </item>
+ <tag><marker id="max_no_bytes"/><c>max_no_bytes = pos_integer() | infinity</c></tag>
+ <item>
+ <p>This parameter specifies if the log file should be rotated
+ or not. The value <c>infinity</c> means the log file will
+ grow indefinitely, while an integer value specifies at which
+ file size (bytes) the file is rotated.</p>
+ <p>Defaults to <c>infinity</c>.</p>
+ </item>
+ <tag><marker id="max_no_files"/><c>max_no_files = non_neg_integer()</c></tag>
+ <item>
+ <p>This parameter specifies the number of rotated log file
+ archives to keep. This has meaning only
+ if <seealso marker="#max_no_bytes"><c>max_no_bytes</c></seealso>
+ is set to an integer value.</p>
+ <p>The log archives are
+ named <c>FileName.0</c>, <c>FileName.1</c>,
+ ... <c>FileName.N</c>, where <c>FileName</c> is the name of
+ the current log file. <c>FileName.0</c> is the newest of the
+ archives. The maximum value for <c>N</c> is the value
+ of <c>max_no_files</c> minus 1.</p>
+ <p>Notice that setting this value to <c>0</c> does not turn of
+ rotation. It only specifies that no archives are kept.</p>
+ <p>Defaults to <c>0</c>.</p>
+ </item>
+ <tag><marker id="compress_on_rotate"/><c>compress_on_rotate = boolean()</c></tag>
+ <item>
+ <p>This parameter specifies if the rotated log file archives
+ shall be compressed or not. If set to <c>true</c>, all
+ archives are compressed with <c>gzip</c>, and renamed
+ to <c>FileName.N.gz</c></p>
+ <p><c>compress_on_rotate</c> has no meaning if <seealso
+ marker="#max_no_bytes"><c>max_no_bytes</c></seealso> has the
+ value <c>infinity</c>.</p>
+ <p>Defaults to <c>false</c>.</p>
+ </item>
+ <tag><marker id="file_check"/><c>file_check = non_neg_integer()</c></tag>
+ <item>
+ <p>When <c>logger_std_h</c> logs to a file, it reads the file
+ information of the log file prior to each write
+ operation. This is to make sure the file still exists and
+ has the same inode as when it was opened. This implies some
+ performance loss, but ensures that no log events are lost in
+ the case when the file has been removed or renamed by an
+ external actor.</p>
+ <p>In order to allow minimizing the performance loss, the
+ <c>file_check</c> parameter can be set to a positive integer
+ value, <c>N</c>. The handler will then skip reading the file
+ information prior to writing, as long as no more
+ than <c>N</c> milliseconds have passed since it was last
+ read.</p>
+ <p>Notice that the risk of loosing log events grows when
+ the <c>file_check</c> value grows.</p>
+ <p>Defaults to 0.</p>
</item>
- <tag><c>filesync_repeat_interval</c></tag>
+ <tag><c>filesync_repeat_interval = pos_integer() | no_repeat</c></tag>
<item>
<p>This value, in milliseconds, specifies how often the handler does
a file sync operation to write buffered data to disk. The handler attempts
@@ -97,12 +172,13 @@
standard handler and the disk_log handler, and are documented in the
<seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
</seealso>.</p>
- <p>Notice that if changing the configuration of the handler in runtime,
- the <c>type</c> parameter must not be modified.</p>
+ <p>Notice that if changing the configuration of the handler in
+ runtime, the <c>type</c>, <c>file</c>, or <c>modes</c> parameters
+ must not be modified.</p>
<p>Example of adding a standard handler:</p>
<code type="none">
logger:add_handler(my_standard_h, logger_std_h,
- #{config => #{type => {file,"./system_info.log"},
+ #{config => #{file => "./system_info.log",
filesync_repeat_interval => 1000}}).
</code>
<p>To set the default handler, that starts initially with
@@ -110,7 +186,7 @@ logger:add_handler(my_standard_h, logger_std_h,
change the Kernel default logger configuration. Example:</p>
<code type="none">
erl -kernel logger '[{handler,default,logger_std_h,
- #{config => #{type => {file,"./log.log"}}}}]'
+ #{config => #{file => "./log.log"}}}]'
</code>
<p>An example of how to replace the standard handler with a disk_log handler
at startup is found in the
diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl
index c68d04e279..111d103df2 100644
--- a/lib/kernel/src/kernel.erl
+++ b/lib/kernel/src/kernel.erl
@@ -145,26 +145,11 @@ init([]) ->
{ok, {SupFlags,
[Code, File, StdError, User, LoggerSup, Config, RefC, SafeSup]}};
_ ->
- Rpc = #{id => rex,
- start => {rpc, start_link, []},
- restart => permanent,
- shutdown => 2000,
- type => worker,
- modules => [rpc]},
-
- Global = #{id => global_name_server,
- start => {global, start_link, []},
- restart => permanent,
- shutdown => 2000,
- type => worker,
- modules => [global]},
-
- GlGroup = #{id => global_group,
- start => {global_group,start_link,[]},
- restart => permanent,
- shutdown => 2000,
- type => worker,
- modules => [global_group]},
+ DistChildren =
+ case application:get_env(kernel, start_distribution) of
+ {ok, false} -> [];
+ _ -> start_distribution()
+ end,
InetDb = #{id => inet_db,
start => {inet_db, start_link, []},
@@ -173,13 +158,6 @@ init([]) ->
type => worker,
modules => [inet_db]},
- NetSup = #{id => net_sup,
- start => {erl_distribution, start_link, []},
- restart => permanent,
- shutdown => infinity,
- type => supervisor,
- modules => [erl_distribution]},
-
SigSrv = #{id => erl_signal_server,
start => {gen_event, start_link, [{local, erl_signal_server}]},
restart => permanent,
@@ -187,14 +165,11 @@ init([]) ->
type => worker,
modules => dynamic},
- DistAC = start_dist_ac(),
-
Timer = start_timer(),
{ok, {SupFlags,
- [Code, Rpc, Global, InetDb | DistAC] ++
- [NetSup, GlGroup, File, SigSrv,
- StdError, User, Config, RefC, SafeSup, LoggerSup] ++ Timer}}
+ [Code, InetDb | DistChildren] ++
+ [File, SigSrv, StdError, User, Config, RefC, SafeSup, LoggerSup] ++ Timer}}
end;
init(safe) ->
SupFlags = #{strategy => one_for_one,
@@ -213,6 +188,39 @@ init(safe) ->
{ok, {SupFlags, Boot ++ DiskLog ++ Pg2}}.
+start_distribution() ->
+ Rpc = #{id => rex,
+ start => {rpc, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [rpc]},
+
+ Global = #{id => global_name_server,
+ start => {global, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [global]},
+
+ DistAC = start_dist_ac(),
+
+ NetSup = #{id => net_sup,
+ start => {erl_distribution, start_link, []},
+ restart => permanent,
+ shutdown => infinity,
+ type => supervisor,
+ modules => [erl_distribution]},
+
+ GlGroup = #{id => global_group,
+ start => {global_group,start_link,[]},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [global_group]},
+
+ [Rpc, Global | DistAC] ++ [NetSup, GlGroup].
+
start_dist_ac() ->
Spec = [#{id => dist_ac,
start => {dist_ac,start_link,[]},
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
index 4b5e0a7dd0..16946ff97c 100644
--- a/lib/kernel/src/logger_h_common.erl
+++ b/lib/kernel/src/logger_h_common.erl
@@ -142,8 +142,9 @@ changing_config(SetOrUpdate,
maps:with(?OLP_KEYS,NewHConfig0)),
case logger_olp:set_opts(Olp,NewOlpOpts) of
ok ->
- maybe_set_repeated_filesync(Olp,OldCommonConfig,
- NewCommonConfig),
+ logger_olp:cast(Olp, {config_changed,
+ NewCommonConfig,
+ NewHandlerConfig}),
ReadOnly = maps:with(?READ_ONLY_KEYS,OldHConfig),
NewHConfig =
maps:merge(
@@ -281,11 +282,24 @@ handle_cast(repeated_filesync,
State#{handler_state => HS, last_op => sync}
end,
{noreply,set_repeated_filesync(State1)};
-
-handle_cast({set_repeated_filesync,FSyncInt},State) ->
- State1 = State#{filesync_repeat_interval=>FSyncInt},
- State2 = set_repeated_filesync(cancel_repeated_filesync(State1)),
- {noreply, State2}.
+handle_cast({config_changed, CommonConfig, HConfig},
+ State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState,
+ filesync_repeat_interval := OldFSyncInt}) ->
+ State1 =
+ case maps:get(filesync_repeat_interval,CommonConfig) of
+ OldFSyncInt ->
+ State;
+ FSyncInt ->
+ set_repeated_filesync(
+ cancel_repeated_filesync(
+ State#{filesync_repeat_interval=>FSyncInt}))
+ end,
+ HS = try Module:config_changed(Name, HConfig, HandlerState)
+ catch error:undef -> HandlerState
+ end,
+ {noreply, State1#{handler_state => HS}}.
handle_info(Info, #{id := Name, module := Module,
handler_state := HandlerState} = State) ->
@@ -447,10 +461,3 @@ cancel_repeated_filesync(State) ->
end.
error_notify(Term) ->
?internal_log(error, Term).
-
-maybe_set_repeated_filesync(_Olp,
- #{filesync_repeat_interval:=FSyncInt},
- #{filesync_repeat_interval:=FSyncInt}) ->
- ok;
-maybe_set_repeated_filesync(Olp,_,#{filesync_repeat_interval:=FSyncInt}) ->
- logger_olp:cast(Olp,{set_repeated_filesync,FSyncInt}).
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
index 65f5b3876e..c8f1acfca4 100644
--- a/lib/kernel/src/logger_std_h.erl
+++ b/lib/kernel/src/logger_std_h.erl
@@ -29,7 +29,7 @@
-export([filesync/1]).
%% logger_h_common callbacks
--export([init/2, check_config/4, reset_state/2,
+-export([init/2, check_config/4, config_changed/3, reset_state/2,
filesync/3, write/4, handle_info/3, terminate/3]).
%% logger callbacks
@@ -105,85 +105,169 @@ filter_config(Config) ->
%%%===================================================================
%%% logger_h_common callbacks
%%%===================================================================
-init(Name, #{type := Type}) ->
- case open_log_file(Name, Type) of
+init(Name, Config) ->
+ MyConfig = maps:with([type,file,modes,file_check,max_no_bytes,
+ max_no_files,compress_on_rotate],Config),
+ case file_ctrl_start(Name, MyConfig) of
{ok,FileCtrlPid} ->
- {ok,#{type=>Type,file_ctrl_pid=>FileCtrlPid}};
+ {ok,MyConfig#{file_ctrl_pid=>FileCtrlPid}};
Error ->
Error
end.
-check_config(_Name,set,undefined,NewHConfig) ->
- check_config(maps:merge(get_default_config(),NewHConfig));
-check_config(_Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
- WriteOnce = maps:with([type],OldHConfig),
+check_config(Name,set,undefined,NewHConfig) ->
+ check_h_config(merge_default_config(Name,normalize_config(NewHConfig)));
+check_config(Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
+ WriteOnce = maps:with([type,file,modes],OldHConfig),
Default =
case SetOrUpdate of
set ->
%% Do not reset write-once fields to defaults
- maps:merge(get_default_config(),WriteOnce);
+ merge_default_config(Name,WriteOnce);
update ->
OldHConfig
end,
- NewHConfig = maps:merge(Default, NewHConfig0),
+ NewHConfig = maps:merge(Default, normalize_config(NewHConfig0)),
%% Fail if write-once fields are changed
- case maps:with([type],NewHConfig) of
+ case maps:with([type,file,modes],NewHConfig) of
WriteOnce ->
- check_config(NewHConfig);
+ check_h_config(NewHConfig);
Other ->
{error,{illegal_config_change,?MODULE,WriteOnce,Other}}
end.
-check_config(#{type:=Type}=HConfig) ->
- case check_h_config(maps:to_list(HConfig)) of
- ok when is_atom(Type) ->
- {ok,HConfig#{filesync_repeat_interval=>no_repeat}};
+check_h_config(HConfig) ->
+ case check_h_config(maps:get(type,HConfig),maps:to_list(HConfig)) of
ok ->
- {ok,HConfig};
+ {ok,fix_file_opts(HConfig)};
{error,{Key,Value}} ->
{error,{invalid_config,?MODULE,#{Key=>Value}}}
end.
-check_h_config([{type,Type} | Config]) when Type == standard_io;
- Type == standard_error ->
- check_h_config(Config);
-check_h_config([{type,{file,File}} | Config]) when is_list(File) ->
- check_h_config(Config);
-check_h_config([{type,{file,File,Modes}} | Config]) when is_list(File),
- is_list(Modes) ->
- check_h_config(Config);
-check_h_config([Other | _]) ->
+check_h_config(Type,[{type,Type} | Config]) when Type =:= standard_io;
+ Type =:= standard_error;
+ Type =:= file ->
+ check_h_config(Type,Config);
+check_h_config(file,[{file,File} | Config]) when is_list(File) ->
+ check_h_config(file,Config);
+check_h_config(file,[{modes,Modes} | Config]) when is_list(Modes) ->
+ check_h_config(file,Config);
+check_h_config(file,[{max_no_bytes,Size} | Config])
+ when (is_integer(Size) andalso Size>0) orelse Size=:=infinity ->
+ check_h_config(file,Config);
+check_h_config(file,[{max_no_files,Num} | Config]) when is_integer(Num), Num>=0 ->
+ check_h_config(file,Config);
+check_h_config(file,[{compress_on_rotate,Bool} | Config]) when is_boolean(Bool) ->
+ check_h_config(file,Config);
+check_h_config(file,[{file_check,FileCheck} | Config])
+ when is_integer(FileCheck), FileCheck>=0 ->
+ check_h_config(file,Config);
+check_h_config(_Type,[Other | _]) ->
{error,Other};
-check_h_config([]) ->
+check_h_config(_Type,[]) ->
ok.
-get_default_config() ->
- #{type => standard_io}.
+normalize_config(#{type:={file,File}}=HConfig) ->
+ HConfig#{type=>file,file=>File};
+normalize_config(#{type:={file,File,Modes}}=HConfig) ->
+ HConfig#{type=>file,file=>File,modes=>Modes};
+normalize_config(HConfig) ->
+ HConfig.
+
+merge_default_config(Name,#{type:=Type}=HConfig) ->
+ merge_default_config(Name,Type,HConfig);
+merge_default_config(Name,#{file:=_}=HConfig) ->
+ merge_default_config(Name,file,HConfig);
+merge_default_config(Name,HConfig) ->
+ merge_default_config(Name,standard_io,HConfig).
+
+merge_default_config(Name,Type,HConfig) ->
+ maps:merge(get_default_config(Name,Type),HConfig).
+
+get_default_config(Name,file) ->
+ #{type => file,
+ file => atom_to_list(Name),
+ modes => [raw,append],
+ file_check => 0,
+ max_no_bytes => infinity,
+ max_no_files => 0,
+ compress_on_rotate => false};
+get_default_config(_Name,Type) ->
+ #{type => Type}.
+
+fix_file_opts(#{modes:=Modes}=HConfig) ->
+ HConfig#{modes=>fix_modes(Modes)};
+fix_file_opts(HConfig) ->
+ HConfig#{filesync_repeat_interval=>no_repeat}.
+
+fix_modes(Modes) ->
+ %% Ensure write|append|exclusive
+ Modes1 =
+ case [M || M <- Modes,
+ lists:member(M,[write,append,exclusive])] of
+ [] -> [append|Modes];
+ _ -> Modes
+ end,
+ %% Ensure raw
+ Modes2 =
+ case lists:member(raw,Modes) of
+ false -> [raw|Modes1];
+ true -> Modes1
+ end,
+ %% Ensure delayed_write
+ case lists:partition(fun(delayed_write) -> true;
+ ({delayed_write,_,_}) -> true;
+ (_) -> false
+ end, Modes2) of
+ {[],_} ->
+ [delayed_write|Modes2];
+ _ ->
+ Modes2
+ end.
-filesync(_Name, _Mode, #{type := Type}=State) when is_atom(Type) ->
- {ok,State};
-filesync(_Name, async, #{file_ctrl_pid := FileCtrlPid} = State) ->
- ok = file_ctrl_filesync_async(FileCtrlPid),
- {ok,State};
-filesync(_Name, sync, #{file_ctrl_pid := FileCtrlPid} = State) ->
- Result = file_ctrl_filesync_sync(FileCtrlPid),
+config_changed(_Name,
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress},
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}=State) ->
+ State;
+config_changed(_Name,
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress},
+ #{file_ctrl_pid := FileCtrlPid} = State) ->
+ FileCtrlPid ! {update_config,#{file_check=>FileCheck,
+ max_no_bytes=>Size,
+ max_no_files=>Count,
+ compress_on_rotate=>Compress}},
+ State#{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress};
+config_changed(_Name,_NewHConfig,State) ->
+ State.
+
+filesync(_Name, SyncAsync, #{file_ctrl_pid := FileCtrlPid} = State) ->
+ Result = file_ctrl_filesync(SyncAsync, FileCtrlPid),
{Result,State}.
-write(_Name, async, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
- ok = file_write_async(FileCtrlPid, Bin),
- {ok,State};
-write(_Name, sync, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
- Result = file_write_sync(FileCtrlPid, Bin),
+write(_Name, SyncAsync, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
+ Result = file_write(SyncAsync, FileCtrlPid, Bin),
{Result,State}.
reset_state(_Name, State) ->
State.
-handle_info(_Name, {'EXIT',Pid,Why}, #{type := FileInfo, file_ctrl_pid := Pid}) ->
+handle_info(_Name, {'EXIT',Pid,Why}, #{file_ctrl_pid := Pid}=State) ->
%% file_ctrl_pid died, file error, terminate handler
- exit({error,{write_failed,FileInfo,Why}});
+ exit({error,{write_failed,maps:with([type,file,modes],State),Why}});
handle_info(_, _, State) ->
State.
@@ -211,27 +295,33 @@ terminate(_Name, _Reason, #{file_ctrl_pid:=FWPid}) ->
%%%-----------------------------------------------------------------
%%%
-open_log_file(HandlerName, FileInfo) ->
- case file_ctrl_start(HandlerName, FileInfo) of
- OK = {ok,_FileCtrlPid} -> OK;
- Error -> Error
- end.
-
-do_open_log_file({file,FileName}) ->
- do_open_log_file({file,FileName,[raw,append,delayed_write]});
-
-do_open_log_file({file,FileName,[]}) ->
- do_open_log_file({file,FileName,[raw,append,delayed_write]});
-
-do_open_log_file({file,FileName,Modes}) ->
+open_log_file(HandlerName,#{type:=file,
+ file:=FileName,
+ modes:=Modes,
+ file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}) ->
try
case filelib:ensure_dir(FileName) of
ok ->
case file:open(FileName, Modes) of
{ok, Fd} ->
{ok,#file_info{inode=INode}} =
- file:read_file_info(FileName),
- {ok, {Fd, INode}};
+ file:read_file_info(FileName,[raw]),
+ UpdateModes = [append | Modes--[write,append,exclusive]],
+ State0 = #{handler_name=>HandlerName,
+ file_name=>FileName,
+ modes=>UpdateModes,
+ file_check=>FileCheck,
+ fd=>Fd,
+ inode=>INode,
+ last_check=>timestamp(),
+ synced=>false,
+ write_res=>ok,
+ sync_res=>ok},
+ State = update_rotation({Size,Count,Compress},State0),
+ {ok,State};
Error ->
Error
end;
@@ -242,21 +332,23 @@ do_open_log_file({file,FileName,Modes}) ->
_:Reason -> {error,Reason}
end.
-close_log_file(Std) when Std == standard_io; Std == standard_error ->
- ok;
-close_log_file({Fd,_}) ->
+close_log_file(#{fd:=Fd}) ->
_ = file:datasync(Fd),
- _ = file:close(Fd).
+ _ = file:close(Fd),
+ ok;
+close_log_file(_) ->
+ ok.
+
%%%-----------------------------------------------------------------
%%% File control process
-file_ctrl_start(HandlerName, FileInfo) ->
+file_ctrl_start(HandlerName, HConfig) ->
Starter = self(),
FileCtrlPid =
spawn_link(fun() ->
- file_ctrl_init(HandlerName, FileInfo, Starter)
+ file_ctrl_init(HandlerName, HConfig, Starter)
end),
receive
{FileCtrlPid,ok} ->
@@ -271,18 +363,16 @@ file_ctrl_start(HandlerName, FileInfo) ->
file_ctrl_stop(Pid) ->
Pid ! stop.
-file_write_async(Pid, Bin) ->
+file_write(async, Pid, Bin) ->
Pid ! {log,Bin},
- ok.
-
-file_write_sync(Pid, Bin) ->
+ ok;
+file_write(sync, Pid, Bin) ->
file_ctrl_call(Pid, {log,Bin}).
-file_ctrl_filesync_async(Pid) ->
+file_ctrl_filesync(async, Pid) ->
Pid ! filesync,
- ok.
-
-file_ctrl_filesync_sync(Pid) ->
+ ok;
+file_ctrl_filesync(sync, Pid) ->
file_ctrl_call(Pid, filesync).
file_ctrl_call(Pid, Msg) ->
@@ -299,98 +389,255 @@ file_ctrl_call(Pid, Msg) ->
{error,{no_response,Pid}}
end.
-file_ctrl_init(HandlerName, FileInfo, Starter) when is_tuple(FileInfo) ->
+file_ctrl_init(HandlerName,
+ #{type:=file,
+ file:=FileName} = HConfig,
+ Starter) ->
process_flag(message_queue_data, off_heap),
- FileName = element(2, FileInfo),
- case do_open_log_file(FileInfo) of
- {ok,File} ->
+ case open_log_file(HandlerName,HConfig) of
+ {ok,State} ->
Starter ! {self(),ok},
- file_ctrl_loop(File, FileName, false, ok, ok, HandlerName);
+ file_ctrl_loop(State);
{error,Reason} ->
Starter ! {self(),{error,{open_failed,FileName,Reason}}}
end;
-file_ctrl_init(HandlerName, StdDev, Starter) ->
+file_ctrl_init(HandlerName, #{type:=StdDev}, Starter) ->
Starter ! {self(),ok},
- file_ctrl_loop(StdDev, StdDev, false, ok, ok, HandlerName).
+ file_ctrl_loop(#{handler_name=>HandlerName,dev=>StdDev}).
-file_ctrl_loop(File, DevName, Synced,
- PrevWriteResult, PrevSyncResult, HandlerName) ->
+file_ctrl_loop(State) ->
receive
%% asynchronous event
{log,Bin} ->
- File1 = ensure(File, DevName),
- Result = write_to_dev(File1, Bin, DevName,
- PrevWriteResult, HandlerName),
- file_ctrl_loop(File1, DevName, false,
- Result, PrevSyncResult, HandlerName);
+ State1 = write_to_dev(Bin,State),
+ file_ctrl_loop(State1);
%% synchronous event
{{log,Bin},{From,MRef}} ->
- File1 = ensure(File, DevName),
- Result = write_to_dev(File1, Bin, DevName,
- PrevWriteResult, HandlerName),
+ State1 = ensure_file(State),
+ State2 = write_to_dev(Bin,State1),
From ! {MRef,ok},
- file_ctrl_loop(File1, DevName, false,
- Result, PrevSyncResult, HandlerName);
+ file_ctrl_loop(State2);
filesync ->
- File1 = ensure(File, DevName),
- Result = sync_dev(File1, DevName, Synced,
- PrevSyncResult, HandlerName),
- file_ctrl_loop(File1, DevName, true,
- PrevWriteResult, Result, HandlerName);
+ State1 = sync_dev(State),
+ file_ctrl_loop(State1);
{filesync,{From,MRef}} ->
- File1 = ensure(File, DevName),
- Result = sync_dev(File1, DevName, Synced,
- PrevSyncResult, HandlerName),
+ State1 = ensure_file(State),
+ State2 = sync_dev(State1),
From ! {MRef,ok},
- file_ctrl_loop(File1, DevName, true,
- PrevWriteResult, Result, HandlerName);
+ file_ctrl_loop(State2);
+
+ {update_config,#{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}} ->
+ State1 = update_rotation({Size,Count,Compress},State),
+ file_ctrl_loop(State1#{file_check=>FileCheck});
stop ->
- _ = close_log_file(File),
+ close_log_file(State),
stopped
end.
+maybe_ensure_file(#{file_check:=0}=State) ->
+ ensure_file(State);
+maybe_ensure_file(#{last_check:=T0,file_check:=CheckInt}=State)
+ when is_integer(CheckInt) ->
+ T = timestamp(),
+ if T-T0 > CheckInt -> ensure_file(State);
+ true -> State
+ end;
+maybe_ensure_file(State) ->
+ State.
+
%% In order to play well with tools like logrotate, we need to be able
%% to re-create the file if it has disappeared (e.g. if rotated by
%% logrotate)
-ensure(Fd,DevName) when is_atom(DevName) ->
- Fd;
-ensure({Fd,INode},FileName) ->
- case file:read_file_info(FileName) of
- {ok,#file_info{inode=INode}} ->
- {Fd,INode};
+ensure_file(#{fd:=Fd0,inode:=INode0,file_name:=FileName,modes:=Modes}=State) ->
+ case file:read_file_info(FileName,[raw]) of
+ {ok,#file_info{inode=INode0}} ->
+ State#{last_check=>timestamp()};
_ ->
- _ = file:close(Fd),
- _ = file:close(Fd), % delayed_write cause close not to close
- case do_open_log_file({file,FileName}) of
- {ok,File} ->
- File;
+ close_log_file(Fd0),
+ case file:open(FileName,Modes) of
+ {ok,Fd} ->
+ {ok,#file_info{inode=INode}} =
+ file:read_file_info(FileName,[raw]),
+ State#{fd=>Fd,inode=>INode,
+ last_check=>timestamp(),
+ synced=>true,sync_res=>ok};
Error ->
exit({could_not_reopen_file,Error})
end
- end.
+ end;
+ensure_file(State) ->
+ State.
-write_to_dev(DevName, Bin, _DevName, _PrevWriteResult, _HandlerName)
- when is_atom(DevName) ->
- io:put_chars(DevName, Bin);
-write_to_dev({Fd,_}, Bin, FileName, PrevWriteResult, HandlerName) ->
+write_to_dev(Bin,#{dev:=DevName}=State) ->
+ io:put_chars(DevName, Bin),
+ State;
+write_to_dev(Bin, State) ->
+ State1 = #{fd:=Fd} = maybe_ensure_file(State),
Result = ?file_write(Fd, Bin),
- maybe_notify_error(write,Result,PrevWriteResult,FileName,HandlerName).
+ State2 = maybe_rotate_file(Bin,State1),
+ maybe_notify_error(write,Result,State2),
+ State2#{synced=>false,write_res=>Result}.
-sync_dev(_, _FileName, true, PrevSyncResult, _HandlerName) ->
- PrevSyncResult;
-sync_dev({Fd,_}, FileName, false, PrevSyncResult, HandlerName) ->
+sync_dev(#{synced:=false}=State) ->
+ State1 = #{fd:=Fd} = maybe_ensure_file(State),
Result = ?file_datasync(Fd),
- maybe_notify_error(filesync,Result,PrevSyncResult,FileName,HandlerName).
+ maybe_notify_error(filesync,Result,State1),
+ State1#{synced=>true,sync_res=>Result};
+sync_dev(State) ->
+ State.
-maybe_notify_error(_Op, ok, _PrevResult, _FileName, _HandlerName) ->
+update_rotation({infinity,_,_},State) ->
+ maybe_remove_archives(0,State),
+ maps:remove(rotation,State);
+update_rotation({Size,Count,Compress},#{file_name:=FileName} = State) ->
+ maybe_remove_archives(Count,State),
+ {ok,#file_info{size=CurrSize}} = file:read_file_info(FileName,[raw]),
+ State1 = State#{rotation=>#{size=>Size,
+ count=>Count,
+ compress=>Compress,
+ curr_size=>CurrSize}},
+ maybe_update_compress(0,State1),
+ maybe_rotate_file(0,State1).
+
+maybe_remove_archives(Count,#{file_name:=FileName}=State) ->
+ Archive = rot_file_name(FileName,Count,false),
+ CompressedArchive = rot_file_name(FileName,Count,true),
+ case {file:read_file_info(Archive,[raw]),
+ file:read_file_info(CompressedArchive,[raw])} of
+ {{error,enoent},{error,enoent}} ->
+ ok;
+ _ ->
+ _ = file:delete(Archive),
+ _ = file:delete(CompressedArchive),
+ maybe_remove_archives(Count+1,State)
+ end.
+
+maybe_update_compress(Count,#{rotation:=#{count:=Count}}) ->
+ ok;
+maybe_update_compress(N,#{file_name:=FileName,
+ rotation:=#{compress:=Compress}}=State) ->
+ Archive = rot_file_name(FileName,N,not Compress),
+ case file:read_file_info(Archive,[raw]) of
+ {ok,_} when Compress ->
+ compress_file(Archive);
+ {ok,_} ->
+ decompress_file(Archive);
+ _ ->
+ ok
+ end,
+ maybe_update_compress(N+1,State).
+
+maybe_rotate_file(Bin,#{rotation:=_}=State) when is_binary(Bin) ->
+ maybe_rotate_file(byte_size(Bin),State);
+maybe_rotate_file(AddSize,#{rotation:=#{size:=RotSize,
+ curr_size:=CurrSize}=Rotation}=State) ->
+ NewSize = CurrSize + AddSize,
+ if NewSize>RotSize ->
+ rotate_file(State#{rotation=>Rotation#{curr_size=>NewSize}});
+ true ->
+ State#{rotation=>Rotation#{curr_size=>NewSize}}
+ end;
+maybe_rotate_file(_Bin,State) ->
+ State.
+
+rotate_file(#{fd:=Fd0,file_name:=FileName,modes:=Modes,rotation:=Rotation}=State) ->
+ State1 = sync_dev(State),
+ _ = file:close(Fd0),
+ _ = file:close(Fd0),
+ rotate_files(FileName,maps:get(count,Rotation),maps:get(compress,Rotation)),
+ case file:open(FileName,Modes) of
+ {ok,Fd} ->
+ {ok,#file_info{inode=INode}} = file:read_file_info(FileName,[raw]),
+ State1#{fd=>Fd,inode=>INode,rotation=>Rotation#{curr_size=>0}};
+ Error ->
+ exit({could_not_reopen_file,Error})
+ end.
+
+rotate_files(FileName,0,_Compress) ->
+ _ = file:delete(FileName),
+ ok;
+rotate_files(FileName,1,Compress) ->
+ FileName0 = FileName++".0",
+ _ = file:rename(FileName,FileName0),
+ if Compress -> compress_file(FileName0);
+ true -> ok
+ end,
ok;
-maybe_notify_error(_Op, PrevResult, PrevResult, _FileName, _HandlerName) ->
+rotate_files(FileName,Count,Compress) ->
+ _ = file:rename(rot_file_name(FileName,Count-2,Compress),
+ rot_file_name(FileName,Count-1,Compress)),
+ rotate_files(FileName,Count-1,Compress).
+
+rot_file_name(FileName,Count,false) ->
+ FileName ++ "." ++ integer_to_list(Count);
+rot_file_name(FileName,Count,true) ->
+ rot_file_name(FileName,Count,false) ++ ".gz".
+
+compress_file(FileName) ->
+ {ok,In} = file:open(FileName,[read,binary]),
+ {ok,Out} = file:open(FileName++".gz",[write]),
+ Z = zlib:open(),
+ zlib:deflateInit(Z, default, deflated, 31, 8, default),
+ compress_data(Z,In,Out),
+ zlib:deflateEnd(Z),
+ zlib:close(Z),
+ _ = file:close(In),
+ _ = file:close(Out),
+ _ = file:delete(FileName),
+ ok.
+
+compress_data(Z,In,Out) ->
+ case file:read(In,100000) of
+ {ok,Data} ->
+ Compressed = zlib:deflate(Z, Data),
+ _ = file:write(Out,Compressed),
+ compress_data(Z,In,Out);
+ eof ->
+ Compressed = zlib:deflate(Z, <<>>, finish),
+ _ = file:write(Out,Compressed),
+ ok
+ end.
+
+decompress_file(FileName) ->
+ {ok,In} = file:open(FileName,[read,binary]),
+ {ok,Out} = file:open(filename:rootname(FileName,".gz"),[write]),
+ Z = zlib:open(),
+ zlib:inflateInit(Z, 31),
+ decompress_data(Z,In,Out),
+ zlib:inflateEnd(Z),
+ zlib:close(Z),
+ _ = file:close(In),
+ _ = file:close(Out),
+ _ = file:delete(FileName),
+ ok.
+
+decompress_data(Z,In,Out) ->
+ case file:read(In,1000) of
+ {ok,Data} ->
+ Decompressed = zlib:inflate(Z, Data),
+ _ = file:write(Out,Decompressed),
+ decompress_data(Z,In,Out);
+ eof ->
+ ok
+ end.
+
+maybe_notify_error(_Op, ok, _State) ->
+ ok;
+maybe_notify_error(Op, Result, #{write_res:=WR,sync_res:=SR})
+ when (Op==write andalso Result==WR) orelse
+ (Op==filesync andalso Result==SR) ->
%% don't report same error twice
- PrevResult;
-maybe_notify_error(Op, Error, _PrevResult, FileName, HandlerName) ->
+ ok;
+maybe_notify_error(Op, Error, #{handler_name:=HandlerName,file_name:=FileName}) ->
logger_h_common:error_notify({HandlerName,Op,FileName,Error}),
- Error.
+ ok.
+
+timestamp() ->
+ erlang:monotonic_time(millisecond).
diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl
index 9207025a2c..57c44c2498 100644
--- a/lib/kernel/test/kernel_config_SUITE.erl
+++ b/lib/kernel/test/kernel_config_SUITE.erl
@@ -21,7 +21,8 @@
-include_lib("common_test/include/ct.hrl").
--export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2, sync/1]).
+-export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2,
+ start_distribution_false/1, sync/1]).
-export([init_per_suite/1, end_per_suite/1]).
@@ -30,7 +31,7 @@ suite() ->
{timetrap,{minutes,2}}].
all() ->
- [sync].
+ [sync, start_distribution_false].
groups() ->
[].
@@ -59,12 +60,9 @@ from(H, [H | T]) -> T;
from(H, [_ | T]) -> from(H, T);
from(_, []) -> [].
-%%-----------------------------------------------------------------
-%% Test suite for sync_nodes. This is quite tricky.
-%%
+%% Test sync_nodes. This is quite tricky.
%% Should be started in a CC view with:
%% erl -sname XXX where XX not in [cp1, cp2]
-%%-----------------------------------------------------------------
sync(Conf) when is_list(Conf) ->
%% Write a config file
Dir = proplists:get_value(priv_dir,Conf),
@@ -106,9 +104,18 @@ wait_for_node(Node) ->
_Other -> wait_for_node(Node)
end.
-
stop_node(Node) ->
M = list_to_atom(lists:concat([Node,
[$@],
from($@,atom_to_list(node()))])),
rpc:cast(M, erlang, halt, []).
+
+start_distribution_false(Config) when is_list(Config) ->
+ %% When distribution is disabled, -sname/-name has no effect
+ Str = os:cmd(ct:get_progname()
+ ++ " -kernel start_distribution false"
+ ++ " -sname no_distribution"
+ ++ " -eval \"erlang:display(node())\""
+ ++ " -noshell -s erlang halt"),
+ "'nonode@nohost'" ++ _ = Str,
+ ok.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
index 70bb775db8..035e5d8974 100644
--- a/lib/kernel/test/logger_SUITE.erl
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -1048,8 +1048,11 @@ kernel_config(Config) ->
ok = rpc:call(Node,logger,internal_init_logger,[]),
ok = rpc:call(Node,logger,add_handlers,[kernel]),
#{primary:=#{filter_default:=log,filters:=[]},
- handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}],
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+ [append,delayed_write,raw] = lists:sort(Modes),
+
%% Same, but using 'logger' parameter instead of 'error_logger'
ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
@@ -1060,26 +1063,27 @@ kernel_config(Config) ->
ok = rpc:call(Node,logger,internal_init_logger,[]),
ok = rpc:call(Node,logger,add_handlers,[kernel]),
#{primary:=#{filter_default:=log,filters:=[]},
- handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}],
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
%% Same, but with type={file,File,Modes}
ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
- M = [raw,write,delayed_write],
+ M = [raw,write],
ok = rpc:call(Node,application,set_env,[kernel,logger,
[{handler,default,logger_std_h,
#{config=>#{type=>{file,F,M}}}}]]),
ok = rpc:call(Node,logger,internal_init_logger,[]),
ok = rpc:call(Node,logger,add_handlers,[kernel]),
#{primary:=#{filter_default:=log,filters:=[]},
- handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F,M}}}],
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=[delayed_write|M]}}],
module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
%% Same, but with disk_log handler
ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
- M = [raw,write,delayed_write],
ok = rpc:call(Node,application,set_env,[kernel,logger,
[{handler,default,logger_disk_log_h,
#{config=>#{file=>F}}}]]),
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index b2c2c8ba67..0c5516f82b 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -112,6 +112,8 @@ all() ->
add_remove_instance_standard_error,
add_remove_instance_file1,
add_remove_instance_file2,
+ add_remove_instance_file3,
+ add_remove_instance_file4,
default_formatter,
filter_config,
errors,
@@ -142,7 +144,12 @@ all() ->
restart_after,
handler_requests_under_load,
recreate_deleted_log,
- reopen_changed_log
+ reopen_changed_log,
+ rotate_size,
+ rotate_size_compressed,
+ rotate_size_reopen,
+ rotation_opts,
+ rotation_opts_restart_handler
].
add_remove_instance_tty(_Config) ->
@@ -179,10 +186,27 @@ add_remove_instance_file2(Config) ->
add_remove_instance_file2(cleanup,_Config) ->
logger_std_h_remove().
-add_remove_instance_file(Log, Type) ->
+add_remove_instance_file3(_Config) ->
+ Log = atom_to_list(?MODULE),
+ StdHConfig = #{type=>file},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file3(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file4(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog4.txt"),
+ StdHConfig = #{file=>Log,modes=>[]},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file4(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file(Log, Type) when not is_map(Type) ->
+ add_remove_instance_file(Log,#{type=>Type});
+add_remove_instance_file(Log, StdHConfig) when is_map(StdHConfig) ->
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => Type},
+ #{config => StdHConfig,
filter_default=>stop,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
@@ -257,9 +281,10 @@ errors(Config) ->
end,
{error,
- {handler_not_added,{open_failed,Log,_}}} =
+ {handler_not_added,
+ {invalid_config,logger_std_h,#{modes:=bad_file_opt}}}} =
logger:add_handler(myh3,logger_std_h,
- #{config=>#{type=>{file,Log,[bad_file_opt]}}}),
+ #{config=>#{type=>{file,Log,bad_file_opt}}}),
ok = logger:notice(?msg).
@@ -607,24 +632,51 @@ reconfig(cleanup, _Config) ->
file_opts(Config) ->
Dir = ?config(priv_dir,Config),
Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
- BadFileOpts = [raw],
- BadType = {file,Log,BadFileOpts},
- {error,{handler_not_added,{open_failed,Log,enoent}}} =
- logger:add_handler(?MODULE, logger_std_h,
- #{config => #{type => BadType}}),
+ MissingOpts = [raw],
+ Type1 = {file,Log,MissingOpts},
+ ok = logger:add_handler(?MODULE, logger_std_h,
+ #{config => #{type => Type1}}),
+ {ok,#{config:=#{type:=file,file:=Log,modes:=Modes1}}} =
+ logger:get_handler_config(?MODULE),
+ [append,delayed_write,raw] = lists:sort(Modes1),
+ ok = logger:remove_handler(?MODULE),
OkFileOpts = [raw,append],
OkType = {file,Log,OkFileOpts},
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => OkType},
+ #{config => #{type => OkType}, % old format
filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
- #{cb_state := #{handler_state := #{type := OkType}}} =
+ ModOpts = [delayed_write|OkFileOpts],
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
logger_olp:info(h_proc_name()),
- {ok,#{config := #{type := OkType}}} = logger:get_handler_config(?MODULE),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => file,
+ file => Log,
+ modes => OkFileOpts}, % new format
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger:get_handler_config(?MODULE),
logger:notice(M1=?msg,?domain),
?check(M1),
B1 = ?bin(M1),
@@ -640,13 +692,14 @@ sync(Config) ->
Type = {file,Log},
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => Type},
+ #{config => #{type => Type,
+ file_check => 10000},
filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,nl}}),
%% check repeated filesync happens
- start_tracer([{logger_std_h, write_to_dev, 5},
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"first\n">>},
{file,datasync}]),
@@ -656,7 +709,7 @@ sync(Config) ->
check_tracer(filesync_rep_int()*2),
%% check that explicit filesync is only done once
- start_tracer([{logger_std_h, write_to_dev, 5},
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"second\n">>},
{file,datasync},
@@ -675,7 +728,7 @@ sync(Config) ->
#{filesync_repeat_interval => no_repeat}),
no_repeat = maps:get(filesync_repeat_interval,
maps:get(cb_state, logger_olp:info(h_proc_name()))),
- start_tracer([{logger_std_h, write_to_dev, 5},
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"third\n">>},
{file,datasync},
@@ -1285,6 +1338,331 @@ reopen_changed_log(Config) ->
reopen_changed_log(cleanup, _Config) ->
ok = stop_handler(?MODULE).
+rotate_size(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1005}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ ok.
+rotate_size(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_compressed(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ #{config=>#{max_no_bytes=>1000,
+ max_no_files=>2,
+ compress_on_rotate=>true}}),
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".0.gz"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".1.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=38}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ ok.
+rotate_size_compressed(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_reopen(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ {ok,HConfig} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(?MODULE,maps:get(module,HConfig),HConfig),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=580}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ ok.
+rotate_size_reopen(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts(Config) ->
+ {Log,_HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ #{max_no_bytes:=infinity,
+ max_no_files:=0,
+ compress_on_rotate:=false} = StdHConfig,
+
+ %% Test bad rotation config
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_bytes=>0}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_files=>infinity}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,
+ #{compress_on_rotate=>undefined}),
+
+
+ %% Test good rotation config - start with no rotation
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=200}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Turn on rotation, check that existing file is rotated since its
+ %% size exceeds max_no_bytes
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0 = Log++".0",
+ {ok,#file_info{size=200}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count and check that nothing changes with existing files
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>200,
+ max_no_files=>3}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Add more log events and see that extended size and count works
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".2"),
+ [_,_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and check that archive files that exceed the new
+ %% count are moved
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count again, and turn on compression. Check
+ %% that archives are compressed
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2,
+ compress_on_rotate=>true}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0gz = Log0++".gz",
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ [Log0gz] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".1.gz"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and turn off compression. Check that archives that
+ %% exceeds the new count are removed, and the rest are
+ %% uncompressed.
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1,
+ compress_on_rotate=>false}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Check that config and handler state agree on the current rotation settings
+ {ok,#{config:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger:get_handler_config(?MODULE),
+ #{cb_state:=#{handler_state:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger_olp:info(h_proc_name()),
+ ok.
+rotation_opts(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts_restart_handler(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+
+ %% Fill all logs
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,15)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off rotation. Check that archives are removed.
+ {ok,#{config:=StdHConfig1}=HConfig1} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig1#{config=>StdHConfig1#{max_no_bytes=>infinity}}),
+ timer:sleep(100),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Add some log events and check that file is no longer rotated.
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=260}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and trun on rotation. Check that file is rotated.
+ {ok,#{config:=StdHConfig2}=HConfig2} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig2#{config=>StdHConfig2#{max_no_bytes=>100,
+ max_no_files=>2}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=80}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".1"),
+
+ %% Stop/start handler, reduce count and turn on compression. Check
+ %% that excess archives are removed, and the rest compressed.
+ {ok,#{config:=StdHConfig3}=HConfig3} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig3#{config=>StdHConfig3#{max_no_bytes=>75,
+ max_no_files=>1,
+ compress_on_rotate=>true}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".0.gz"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off compression. Check that achives
+ %% are decompressed.
+ {ok,#{config:=StdHConfig4}=HConfig4} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig4#{config=>StdHConfig4#{compress_on_rotate=>false}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=80}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ ok.
+rotation_opts_restart_handler(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
%%%-----------------------------------------------------------------
%%%
send_requests(TO, Reqs = [{Mod,Func,Args,Res}|Rs]) ->
@@ -1305,8 +1683,8 @@ start_handler(Name, TTY, _Config) when TTY == standard_io;
ok = logger:add_handler(Name,
logger_std_h,
#{config => #{type => TTY},
- filter_default=>log,
- filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
formatter=>{?MODULE,op}}),
{ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
{HConfig,StdHConfig};
@@ -1320,12 +1698,17 @@ start_handler(Name, FuncName, Config) ->
ok = logger:add_handler(Name,
logger_std_h,
#{config => #{type => Type},
- filter_default=>log,
- filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
formatter=>{?MODULE,op}}),
{ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
{Log,HConfig,StdHConfig}.
+
+filter_only_this_domain(Name) ->
+ [{remote_gl,{fun logger_filters:remote_gl/2,stop}},
+ {domain,{fun logger_filters:domain/2,{log,super,[Name]}}}].
+
stop_handler(Name) ->
R = logger:remove_handler(Name),
ct:pal("Handler ~p stopped! Result: ~p", [Name,R]),
@@ -1658,7 +2041,7 @@ tpl([]) ->
tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]}},
{Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
-tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}},
+tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[Data,_]}},
{Pid,[{Mod,Func,Data}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
@@ -1742,4 +2125,3 @@ filesync_rep_int() ->
file_delete(Log) ->
file:delete(Log).
-
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index 422c4c94ae..e5b01cce5f 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -2736,6 +2736,11 @@ ssl_options_list([Key | Keys], [Value | Values], Acc) ->
handle_active_option(false, connection = StateName, To, Reply, State) ->
hibernate_after(StateName, State, [{reply, To, Reply}]);
+handle_active_option(_, connection = StateName, To, _Reply, #state{connection_env = #connection_env{terminated = true},
+ user_data_buffer = {_,0,_}} = State) ->
+ handle_normal_shutdown(?ALERT_REC(?FATAL, ?CLOSE_NOTIFY, all_data_deliverd), StateName,
+ State#state{start_or_recv_from = To}),
+ {stop,{shutdown, peer_close}, State};
handle_active_option(_, connection = StateName0, To, Reply, #state{static_env = #static_env{protocol_cb = Connection},
user_data_buffer = {_,0,_}} = State0) ->
case Connection:next_event(StateName0, no_record, State0) of
diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl
index 22c77aa172..7703198c4c 100644
--- a/lib/stdlib/test/ets_SUITE.erl
+++ b/lib/stdlib/test/ets_SUITE.erl
@@ -59,6 +59,7 @@
-export([otp_5340/1]).
-export([otp_6338/1]).
-export([otp_6842_select_1000/1]).
+-export([select_mbuf_trapping/1]).
-export([otp_7665/1]).
-export([meta_wb/1]).
-export([grow_shrink/1, grow_pseudo_deleted/1, shrink_pseudo_deleted/1]).
@@ -132,6 +133,7 @@ all() ->
t_named_select,
select_fail, t_insert_new, t_repair_continuation,
otp_5340, otp_6338, otp_6842_select_1000, otp_7665,
+ select_mbuf_trapping,
otp_8732, meta_wb, grow_shrink, grow_pseudo_deleted,
shrink_pseudo_deleted, {group, meta_smp}, smp_insert,
smp_fixed_delete, smp_unfix_fix, smp_select_replace,
@@ -5292,6 +5294,61 @@ otp_6338(Config) when is_list(Config) ->
end),
ok.
+%% OTP-15660: Verify select not doing excessive trapping
+%% when process have mbuf heap fragments.
+select_mbuf_trapping(Config) when is_list(Config) ->
+ select_mbuf_trapping_do(set),
+ select_mbuf_trapping_do(ordered_set).
+
+select_mbuf_trapping_do(Type) ->
+ T = ets:new(xxx, [Type]),
+ NKeys = 50,
+ [ets:insert(T, {K, value}) || K <- lists:seq(1,NKeys)],
+
+ {priority, Prio} = process_info(self(), priority),
+ Tracee = self(),
+ [SchedTracer]
+ = start_loopers(1, Prio,
+ fun (SC) ->
+ receive
+ {trace, Tracee, out, _} ->
+ SC+1;
+ done ->
+ Tracee ! {schedule_count, SC},
+ exit(normal)
+ end
+ end,
+ 0),
+
+ erlang:garbage_collect(),
+ 1 = erlang:trace(self(), true, [running,{tracer,SchedTracer}]),
+
+ %% Artificially create an mbuf heap fragment
+ MbufTerm = "Frag me up",
+ MbufTerm = erts_debug:set_internal_state(mbuf, MbufTerm),
+
+ Keys = ets:select(T, [{{'$1', value}, [], ['$1']}]),
+ NKeys = length(Keys),
+
+ 1 = erlang:trace(self(), false, [running]),
+ Ref = erlang:trace_delivered(Tracee),
+ receive
+ {trace_delivered, Tracee, Ref} ->
+ SchedTracer ! done
+ end,
+ receive
+ {schedule_count, N} ->
+ io:format("~p context switches: ~p", [Type,N]),
+ if
+ N < 3 -> ok;
+ true -> ct:fail(failed)
+ end
+ end,
+ true = ets:delete(T),
+ ok.
+
+
+
%% Elements could come in the wrong order in a bag if a rehash occurred.
otp_5340(Config) when is_list(Config) ->
repeat_for_opts(fun otp_5340_do/1).