aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/arith_instrs.tab189
-rw-r--r--erts/emulator/beam/atom.names3
-rw-r--r--erts/emulator/beam/beam_debug.c52
-rw-r--r--erts/emulator/beam/beam_emu.c217
-rw-r--r--erts/emulator/beam/beam_load.c624
-rw-r--r--erts/emulator/beam/bif.c366
-rw-r--r--erts/emulator/beam/bif.tab30
-rw-r--r--erts/emulator/beam/bif_instrs.tab337
-rw-r--r--erts/emulator/beam/big.h2
-rw-r--r--erts/emulator/beam/break.c58
-rw-r--r--erts/emulator/beam/bs_instrs.tab649
-rw-r--r--erts/emulator/beam/copy.c6
-rw-r--r--erts/emulator/beam/dist.c1788
-rw-r--r--erts/emulator/beam/dist.h275
-rw-r--r--erts/emulator/beam/erl_afit_alloc.c2
-rw-r--r--erts/emulator/beam/erl_alloc.c263
-rw-r--r--erts/emulator/beam/erl_alloc.h33
-rw-r--r--erts/emulator/beam/erl_alloc.types30
-rw-r--r--erts/emulator/beam/erl_alloc_util.c970
-rw-r--r--erts/emulator/beam/erl_alloc_util.h90
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c129
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.h7
-rw-r--r--erts/emulator/beam/erl_arith.c970
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.c2
-rw-r--r--erts/emulator/beam/erl_bif_binary.c401
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c2
-rw-r--r--erts/emulator/beam/erl_bif_guard.c540
-rw-r--r--erts/emulator/beam/erl_bif_info.c94
-rw-r--r--erts/emulator/beam/erl_bif_lists.c315
-rw-r--r--erts/emulator/beam/erl_bif_persistent.c530
-rw-r--r--erts/emulator/beam/erl_bif_re.c64
-rw-r--r--erts/emulator/beam/erl_binary.h12
-rw-r--r--erts/emulator/beam/erl_bits.c52
-rw-r--r--erts/emulator/beam/erl_bits.h13
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c41
-rw-r--r--erts/emulator/beam/erl_cpu_topology.h22
-rw-r--r--erts/emulator/beam/erl_db.c331
-rw-r--r--erts/emulator/beam/erl_db.h42
-rw-r--r--erts/emulator/beam/erl_db_catree.c2332
-rw-r--r--erts/emulator/beam/erl_db_catree.h137
-rw-r--r--erts/emulator/beam/erl_db_hash.c507
-rw-r--r--erts/emulator/beam/erl_db_tree.c1807
-rw-r--r--erts/emulator/beam/erl_db_tree_util.h174
-rw-r--r--erts/emulator/beam/erl_db_util.c34
-rw-r--r--erts/emulator/beam/erl_db_util.h95
-rw-r--r--erts/emulator/beam/erl_dirty_bif.tab3
-rw-r--r--erts/emulator/beam/erl_drv_nif.h4
-rw-r--r--erts/emulator/beam/erl_flxctr.c370
-rw-r--r--erts/emulator/beam/erl_flxctr.h406
-rw-r--r--erts/emulator/beam/erl_gc.c48
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.c2
-rw-r--r--erts/emulator/beam/erl_hl_timer.c39
-rw-r--r--erts/emulator/beam/erl_hl_timer.h2
-rw-r--r--erts/emulator/beam/erl_init.c108
-rw-r--r--erts/emulator/beam/erl_lock_check.c100
-rw-r--r--erts/emulator/beam/erl_lock_check.h4
-rw-r--r--erts/emulator/beam/erl_lock_count.h2
-rw-r--r--erts/emulator/beam/erl_lock_flags.h6
-rw-r--r--erts/emulator/beam/erl_map.c74
-rw-r--r--erts/emulator/beam/erl_message.c233
-rw-r--r--erts/emulator/beam/erl_message.h37
-rw-r--r--erts/emulator/beam/erl_monitor_link.c131
-rw-r--r--erts/emulator/beam/erl_monitor_link.h230
-rw-r--r--erts/emulator/beam/erl_nif.c181
-rw-r--r--erts/emulator/beam/erl_nif.h38
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h19
-rw-r--r--erts/emulator/beam/erl_node_tables.c243
-rw-r--r--erts/emulator/beam/erl_node_tables.h88
-rw-r--r--erts/emulator/beam/erl_port_task.c2
-rw-r--r--erts/emulator/beam/erl_posix_str.c3
-rw-r--r--erts/emulator/beam/erl_printf_term.c4
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.c378
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.h88
-rw-r--r--erts/emulator/beam/erl_process.c1127
-rw-r--r--erts/emulator/beam/erl_process.h20
-rw-r--r--erts/emulator/beam/erl_process_dump.c51
-rw-r--r--erts/emulator/beam/erl_ptab.h5
-rw-r--r--erts/emulator/beam/erl_rbtree.h280
-rw-r--r--erts/emulator/beam/erl_time_sup.c6
-rw-r--r--erts/emulator/beam/erl_trace.c6
-rw-r--r--erts/emulator/beam/erl_unicode.c4
-rw-r--r--erts/emulator/beam/erl_utils.h1
-rw-r--r--erts/emulator/beam/erl_vm.h28
-rw-r--r--erts/emulator/beam/external.c473
-rw-r--r--erts/emulator/beam/external.h99
-rw-r--r--erts/emulator/beam/global.h18
-rw-r--r--erts/emulator/beam/instrs.tab184
-rw-r--r--erts/emulator/beam/io.c85
-rw-r--r--erts/emulator/beam/macros.tab15
-rw-r--r--erts/emulator/beam/map_instrs.tab28
-rw-r--r--erts/emulator/beam/msg_instrs.tab4
-rw-r--r--erts/emulator/beam/ops.tab702
-rw-r--r--erts/emulator/beam/sys.h25
-rw-r--r--erts/emulator/beam/time.c9
-rw-r--r--erts/emulator/beam/utils.c15
95 files changed, 13942 insertions, 6723 deletions
diff --git a/erts/emulator/beam/arith_instrs.tab b/erts/emulator/beam/arith_instrs.tab
index b828e86788..f14b376419 100644
--- a/erts/emulator/beam/arith_instrs.tab
+++ b/erts/emulator/beam/arith_instrs.tab
@@ -19,21 +19,22 @@
// %CopyrightEnd%
//
-OUTLINED_ARITH_2(Fail, Live, Name, BIF, Op1, Op2, Dst) {
+OUTLINED_ARITH_2(Fail, Name, BIF, Op1, Op2, Dst) {
Eterm result;
- Uint live = $Live;
- HEAVY_SWAPOUT;
- reg[live] = $Op1;
- reg[live+1] = $Op2;
- result = erts_gc_$Name (c_p, reg, live);
- HEAVY_SWAPIN;
+#ifdef DEBUG
+ Eterm* orig_htop = HTOP;
+ Eterm* orig_stop = E;
+#endif
+ DEBUG_SWAPOUT;
+ result = erts_$Name (c_p, $Op1, $Op2);
+ DEBUG_SWAPIN;
+ ASSERT(orig_htop == HTOP && orig_stop == E);
ERTS_HOLE_CHECK(c_p);
if (ERTS_LIKELY(is_value(result))) {
- $REFRESH_GEN_DEST();
$Dst = result;
$NEXT0();
}
- $BIF_ERROR_ARITY_2($Fail, $BIF, reg[live], reg[live+1]);
+ $BIF_ERROR_ARITY_2($Fail, $BIF, $Op1, $Op2);
}
@@ -48,15 +49,54 @@ plus.fetch(Op1, Op2) {
PlusOp2 = $Op2;
}
-plus.execute(Fail, Live, Dst) {
+plus.execute(Fail, Dst) {
if (ERTS_LIKELY(is_both_small(PlusOp1, PlusOp2))) {
+#ifdef HAVE_OVERFLOW_CHECK_BUILTINS
+ Sint lhs_tagged, rhs_untagged, res;
+
+ /* The value part of immediate integers start right after the tag and
+ * occupy the rest of the word, so if you squint a bit they look like
+ * fixed-point integers; as long as you mask the tag away you will get
+ * correct results from addition/subtraction since they share the same
+ * notion of zero. It's fairly easy to see that the following holds
+ * when (a + b) is in range:
+ *
+ * (a >> s) + (b >> s) == ((a & ~m) + (b & ~m)) >> s
+ *
+ * Where 's' is the tag size and 'm' is the tag mask.
+ *
+ * The left-hand side is our fallback in the #else clause and is the
+ * fastest way to do this safely in plain C. The actual addition will
+ * never overflow since `Sint` has a much greater range than our
+ * smalls, so we can use the IS_SSMALL macro to see if the result is
+ * within range.
+ *
+ * What we're doing below is an extension of the right-hand side. By
+ * treating `a` and `b` as fixed-point integers, all additions whose
+ * result is out of range will also overflow `Sint` and we can use the
+ * compiler's overflow intrinsics to check for this condition.
+ *
+ * In addition, since the tag lives in the lowest bits we can further
+ * optimize this by only stripping the tag from either side. The higher
+ * bits can't influence the tag bits since we bail on overflow, so the
+ * tag bits from the tagged side will simply appear in the result. */
+ lhs_tagged = PlusOp1;
+ rhs_untagged = PlusOp2 & ~_TAG_IMMED1_MASK;
+
+ if (ERTS_LIKELY(!__builtin_add_overflow(lhs_tagged, rhs_untagged, &res))) {
+ ASSERT(is_small(res));
+ $Dst = res;
+ $NEXT0();
+ }
+#else
Sint i = signed_val(PlusOp1) + signed_val(PlusOp2);
if (ERTS_LIKELY(IS_SSMALL(i))) {
$Dst = make_small(i);
$NEXT0();
}
+#endif
}
- $OUTLINED_ARITH_2($Fail, $Live, mixed_plus, BIF_splus_2, PlusOp1, PlusOp2, $Dst);
+ $OUTLINED_ARITH_2($Fail, mixed_plus, BIF_splus_2, PlusOp1, PlusOp2, $Dst);
}
i_minus := minus.fetch.execute;
@@ -70,15 +110,30 @@ minus.fetch(Op1, Op2) {
MinusOp2 = $Op2;
}
-minus.execute(Fail, Live, Dst) {
+minus.execute(Fail, Dst) {
if (ERTS_LIKELY(is_both_small(MinusOp1, MinusOp2))) {
+#ifdef HAVE_OVERFLOW_CHECK_BUILTINS
+ Sint lhs_tagged, rhs_untagged, res;
+
+ /* See plus.execute */
+ lhs_tagged = MinusOp1;
+ rhs_untagged = MinusOp2 & ~_TAG_IMMED1_MASK;
+
+ if (ERTS_LIKELY(!__builtin_sub_overflow(lhs_tagged, rhs_untagged, &res))) {
+ ASSERT(is_small(res));
+ $Dst = res;
+ $NEXT0();
+ }
+#else
Sint i = signed_val(MinusOp1) - signed_val(MinusOp2);
+
if (ERTS_LIKELY(IS_SSMALL(i))) {
$Dst = make_small(i);
$NEXT0();
}
+#endif
}
- $OUTLINED_ARITH_2($Fail, $Live, mixed_minus, BIF_sminus_2, MinusOp1, MinusOp2, $Dst);
+ $OUTLINED_ARITH_2($Fail, mixed_minus, BIF_sminus_2, MinusOp1, MinusOp2, $Dst);
}
i_increment := increment.fetch.execute;
@@ -91,27 +146,35 @@ increment.fetch(Src) {
increment_reg_val = $Src;
}
-increment.execute(IncrementVal, Live, Dst) {
+increment.execute(IncrementVal, Dst) {
Eterm increment_val = $IncrementVal;
- Uint live;
Eterm result;
if (ERTS_LIKELY(is_small(increment_reg_val))) {
+#ifdef HAVE_OVERFLOW_CHECK_BUILTINS
+ Sint lhs_tagged, rhs_untagged, res;
+
+ /* See plus.execute */
+ lhs_tagged = increment_reg_val;
+ rhs_untagged = (Sint)increment_val << _TAG_IMMED1_SIZE;
+
+ if (ERTS_LIKELY(!__builtin_add_overflow(lhs_tagged, rhs_untagged, &res))) {
+ ASSERT(is_small(res));
+ $Dst = res;
+ $NEXT0();
+ }
+#else
Sint i = signed_val(increment_reg_val) + increment_val;
if (ERTS_LIKELY(IS_SSMALL(i))) {
$Dst = make_small(i);
$NEXT0();
}
+#endif
}
- live = $Live;
- HEAVY_SWAPOUT;
- reg[live] = increment_reg_val;
- reg[live+1] = make_small(increment_val);
- result = erts_gc_mixed_plus(c_p, reg, live);
- HEAVY_SWAPIN;
+
+ result = erts_mixed_plus(c_p, increment_reg_val, make_small(increment_val));
ERTS_HOLE_CHECK(c_p);
if (ERTS_LIKELY(is_value(result))) {
- $REFRESH_GEN_DEST();
$Dst = result;
$NEXT0();
}
@@ -119,19 +182,34 @@ increment.execute(IncrementVal, Live, Dst) {
goto find_func_info;
}
-i_times(Fail, Live, Op1, Op2, Dst) {
+i_times(Fail, Op1, Op2, Dst) {
Eterm op1 = $Op1;
Eterm op2 = $Op2;
- $OUTLINED_ARITH_2($Fail, $Live, mixed_times, BIF_stimes_2, op1, op2, $Dst);
+#ifdef HAVE_OVERFLOW_CHECK_BUILTINS
+ if (ERTS_LIKELY(is_both_small(op1, op2))) {
+ /* See plus.execute */
+ Sint lhs_untagged, rhs_actual, res;
+
+ lhs_untagged = op1 & ~_TAG_IMMED1_MASK;
+ rhs_actual = signed_val(op2);
+
+ if (ERTS_LIKELY(!__builtin_mul_overflow(lhs_untagged, rhs_actual, &res))) {
+ ASSERT(!(res & _TAG_IMMED1_MASK));
+ $Dst = res | _TAG_IMMED1_SMALL;
+ $NEXT0();
+ }
+ }
+#endif
+ $OUTLINED_ARITH_2($Fail, mixed_times, BIF_stimes_2, op1, op2, $Dst);
}
-i_m_div(Fail, Live, Op1, Op2, Dst) {
+i_m_div(Fail, Op1, Op2, Dst) {
Eterm op1 = $Op1;
Eterm op2 = $Op2;
- $OUTLINED_ARITH_2($Fail, $Live, mixed_div, BIF_div_2, op1, op2, $Dst);
+ $OUTLINED_ARITH_2($Fail, mixed_div, BIF_div_2, op1, op2, $Dst);
}
-i_int_div(Fail, Live, Op1, Op2, Dst) {
+i_int_div(Fail, Op1, Op2, Dst) {
Eterm op1 = $Op1;
Eterm op2 = $Op2;
if (ERTS_UNLIKELY(op2 == SMALL_ZERO)) {
@@ -144,7 +222,7 @@ i_int_div(Fail, Live, Op1, Op2, Dst) {
$NEXT0();
}
}
- $OUTLINED_ARITH_2($Fail, $Live, int_div, BIF_intdiv_2, op1, op2, $Dst);
+ $OUTLINED_ARITH_2($Fail, int_div, BIF_intdiv_2, op1, op2, $Dst);
}
i_rem := rem.fetch.execute;
@@ -158,7 +236,7 @@ rem.fetch(Src1, Src2) {
RemOp2 = $Src2;
}
-rem.execute(Fail, Live, Dst) {
+rem.execute(Fail, Dst) {
if (ERTS_UNLIKELY(RemOp2 == SMALL_ZERO)) {
c_p->freason = BADARITH;
$BIF_ERROR_ARITY_2($Fail, BIF_rem_2, RemOp1, RemOp2);
@@ -166,7 +244,7 @@ rem.execute(Fail, Live, Dst) {
$Dst = make_small(signed_val(RemOp1) % signed_val(RemOp2));
$NEXT0();
} else {
- $OUTLINED_ARITH_2($Fail, $Live, int_rem, BIF_rem_2, RemOp1, RemOp2, $Dst);
+ $OUTLINED_ARITH_2($Fail, int_rem, BIF_rem_2, RemOp1, RemOp2, $Dst);
}
}
@@ -181,7 +259,7 @@ band.fetch(Src1, Src2) {
BandOp2 = $Src2;
}
-band.execute(Fail, Live, Dst) {
+band.execute(Fail, Dst) {
if (ERTS_LIKELY(is_both_small(BandOp1, BandOp2))) {
/*
* No need to untag -- TAG & TAG == TAG.
@@ -189,10 +267,10 @@ band.execute(Fail, Live, Dst) {
$Dst = BandOp1 & BandOp2;
$NEXT0();
}
- $OUTLINED_ARITH_2($Fail, $Live, band, BIF_band_2, BandOp1, BandOp2, $Dst);
+ $OUTLINED_ARITH_2($Fail, band, BIF_band_2, BandOp1, BandOp2, $Dst);
}
-i_bor(Fail, Live, Src1, Src2, Dst) {
+i_bor(Fail, Src1, Src2, Dst) {
if (ERTS_LIKELY(is_both_small($Src1, $Src2))) {
/*
* No need to untag -- TAG | TAG == TAG.
@@ -200,10 +278,10 @@ i_bor(Fail, Live, Src1, Src2, Dst) {
$Dst = $Src1 | $Src2;
$NEXT0();
}
- $OUTLINED_ARITH_2($Fail, $Live, bor, BIF_bor_2, $Src1, $Src2, $Dst);
+ $OUTLINED_ARITH_2($Fail, bor, BIF_bor_2, $Src1, $Src2, $Dst);
}
-i_bxor(Fail, Live, Src1, Src2, Dst) {
+i_bxor(Fail, Src1, Src2, Dst) {
if (ERTS_LIKELY(is_both_small($Src1, $Src2))) {
/*
* TAG ^ TAG == 0.
@@ -214,7 +292,7 @@ i_bxor(Fail, Live, Src1, Src2, Dst) {
$Dst = ($Src1 ^ $Src2) | make_small(0);
$NEXT0();
}
- $OUTLINED_ARITH_2($Fail, $Live, bxor, BIF_bxor_2, $Src1, $Src2, $Dst);
+ $OUTLINED_ARITH_2($Fail, bxor, BIF_bxor_2, $Src1, $Src2, $Dst);
}
i_bsl := shift.setup_bsl.execute;
@@ -265,7 +343,7 @@ shift.setup_bsl(Src1, Src2) {
}
}
-shift.execute(Fail, Live, Dst) {
+shift.execute(Fail, Dst) {
Uint big_words_needed;
if (ERTS_LIKELY(is_small(Op1))) {
@@ -320,7 +398,9 @@ shift.execute(Fail, Live, Dst) {
}
{
Eterm tmp_big[2];
- Sint big_need_size = BIG_NEED_SIZE(big_words_needed+1);
+ Sint big_need_size = 1 + BIG_NEED_SIZE(big_words_needed+1);
+ Eterm* hp;
+ Eterm* hp_end;
/*
* Slightly conservative check the size to avoid
@@ -331,15 +411,16 @@ shift.execute(Fail, Live, Dst) {
if (big_need_size-8 > BIG_ARITY_MAX) {
$SYSTEM_LIMIT($Fail);
}
- $GC_TEST_PRESERVE(big_need_size+1, $Live, Op1);
+ hp = HeapFragOnlyAlloc(c_p, big_need_size);
if (is_small(Op1)) {
Op1 = small_to_big(signed_val(Op1), tmp_big);
}
- Op1 = big_lshift(Op1, shift_left_count, HTOP);
+ Op1 = big_lshift(Op1, shift_left_count, hp);
+ hp_end = hp + big_need_size;
if (is_big(Op1)) {
- HTOP += bignum_header_arity(*HTOP) + 1;
+ hp += bignum_header_arity(*hp) + 1;
}
- HEAP_SPACE_VERIFIED(0);
+ HRelease(c_p, hp_end, hp);
if (ERTS_UNLIKELY(is_nil(Op1))) {
/*
* This result must have been only slighty larger
@@ -349,7 +430,6 @@ shift.execute(Fail, Live, Dst) {
$SYSTEM_LIMIT($Fail);
}
ERTS_HOLE_CHECK(c_p);
- $REFRESH_GEN_DEST();
$Dst = Op1;
$NEXT0();
}
@@ -366,31 +446,28 @@ shift.execute(Fail, Live, Dst) {
reg[0] = Op1;
reg[1] = Op2;
SWAPOUT;
- if (IsOpCode(I[0], i_bsl_ssjtd)) {
+ if (IsOpCode(I[0], i_bsl_ssjd)) {
I = handle_error(c_p, I, reg, &bif_export[BIF_bsl_2]->info.mfa);
} else {
- ASSERT(IsOpCode(I[0], i_bsr_ssjtd));
+ ASSERT(IsOpCode(I[0], i_bsr_ssjd));
I = handle_error(c_p, I, reg, &bif_export[BIF_bsr_2]->info.mfa);
}
goto post_error_handling;
}
}
-i_int_bnot(Fail, Src, Live, Dst) {
+i_int_bnot(Fail, Src, Dst) {
Eterm bnot_val = $Src;
+ Eterm result;
+
if (ERTS_LIKELY(is_small(bnot_val))) {
- bnot_val = make_small(~signed_val(bnot_val));
+ result = make_small(~signed_val(bnot_val));
} else {
- Uint live = $Live;
- HEAVY_SWAPOUT;
- reg[live] = bnot_val;
- bnot_val = erts_gc_bnot(c_p, reg, live);
- HEAVY_SWAPIN;
+ result = erts_bnot(c_p, bnot_val);
ERTS_HOLE_CHECK(c_p);
- if (ERTS_UNLIKELY(is_nil(bnot_val))) {
- $BIF_ERROR_ARITY_1($Fail, BIF_bnot_1, reg[live]);
+ if (ERTS_UNLIKELY(is_nil(result))) {
+ $BIF_ERROR_ARITY_1($Fail, BIF_bnot_1, bnot_val);
}
- $REFRESH_GEN_DEST();
}
- $Dst = bnot_val;
+ $Dst = result;
}
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 291bc95604..412d689246 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -209,7 +209,6 @@ atom dirty_nif_finalizer
atom disable_trace
atom disabled
atom discard
-atom display_items
atom dist
atom dist_cmd
atom dist_ctrl_put_data
@@ -238,6 +237,7 @@ atom eof
atom eol
atom Eq='=:='
atom Eqeq='=='
+atom erl_init
atom erl_tracer
atom erlang
atom erl_signal_server
@@ -290,6 +290,7 @@ atom Ge='>='
atom generational
atom get_all_trap
atom get_seq_token
+atom get_size
atom get_tcw
atom gather_gc_info_result
atom gather_io_bytes
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 9633de2021..4d52435139 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -332,6 +332,13 @@ erts_debug_disassemble_1(BIF_ALIST_1)
"unknown " HEXF "\n", instr);
code_ptr++;
}
+ if (i == op_call_nif) {
+ /*
+ * The rest of the code will not be executed. Don't disassemble any
+ * more code in this function.
+ */
+ code_ptr = 0;
+ }
bin = new_binary(p, (byte *) dsbufp->str, dsbufp->str_len);
erts_destroy_tmp_dsbuf(dsbufp);
hsz = 4+4;
@@ -346,6 +353,22 @@ erts_debug_disassemble_1(BIF_ALIST_1)
return TUPLE3(hp, addr, bin, mfa);
}
+BIF_RETTYPE
+erts_debug_interpreter_size_0(BIF_ALIST_0)
+{
+ int i;
+ BeamInstr low, high;
+
+ low = high = (BeamInstr) process_main;
+ for (i = 0; i < NUM_SPECIFIC_OPS; i++) {
+ BeamInstr a = BeamOpCodeAddr(i);
+ if (a > high) {
+ high = a;
+ }
+ }
+ return erts_make_integer(high - low, BIF_P);
+}
+
void
dbg_bt(Process* p, Eterm* sp)
{
@@ -558,23 +581,6 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
case 'I':
case 'W':
switch (op) {
- case op_i_gc_bif1_jWstd:
- case op_i_gc_bif2_jWtssd:
- case op_i_gc_bif3_jWtssd:
- {
- const ErtsGcBif* p;
- BifFunction gcf = (BifFunction) *ap;
- for (p = erts_gc_bifs; p->bif != 0; p++) {
- if (p->gc_bif == gcf) {
- print_bif_name(to, to_arg, p->bif);
- break;
- }
- }
- if (p->bif == 0) {
- erts_print(to, to_arg, "%d", (Uint)gcf);
- }
- break;
- }
case op_i_make_fun_Wt:
if (*sign == 'W') {
ErlFunEntry* fe = (ErlFunEntry *) *ap;
@@ -586,6 +592,7 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
}
break;
case op_i_bs_match_string_xfWW:
+ case op_i_bs_match_string_yfWW:
if (ap - first_arg < 3) {
erts_print(to, to_arg, "%d", *ap);
} else {
@@ -786,11 +793,14 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
}
}
break;
- case op_i_put_tuple_xI:
- case op_i_put_tuple_yI:
+ case op_put_tuple2_xI:
+ case op_put_tuple2_yI:
case op_new_map_dtI:
- case op_update_map_assoc_sdtI:
- case op_update_map_exact_jsdtI:
+ case op_update_map_assoc_xdtI:
+ case op_update_map_assoc_ydtI:
+ case op_update_map_assoc_cdtI:
+ case op_update_map_exact_xjdtI:
+ case op_update_map_exact_yjdtI:
{
int n = unpacked[-1];
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 3af0838794..07c16e3415 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -206,8 +206,12 @@ void** beam_ops;
#ifdef DEBUG
# /* The stack pointer is used in an assertion. */
# define LIGHT_SWAPOUT SWAPOUT
+# define DEBUG_SWAPOUT SWAPOUT
+# define DEBUG_SWAPIN SWAPIN
#else
# define LIGHT_SWAPOUT HEAP_TOP(c_p) = HTOP
+# define DEBUG_SWAPOUT
+# define DEBUG_SWAPIN
#endif
/*
@@ -318,19 +322,19 @@ void** beam_ops;
#define Arg(N) I[(N)+1]
-#define GetR(pos, tr) \
+#define GetSource(raw, dst) \
do { \
- tr = Arg(pos); \
- switch (loader_tag(tr)) { \
+ dst = raw; \
+ switch (loader_tag(dst)) { \
case LOADER_X_REG: \
- tr = x(loader_x_reg_index(tr)); \
+ dst = x(loader_x_reg_index(dst)); \
break; \
case LOADER_Y_REG: \
- ASSERT(loader_y_reg_index(tr) >= 1); \
- tr = y(loader_y_reg_index(tr)); \
+ ASSERT(loader_y_reg_index(dst) >= 1); \
+ dst = y(loader_y_reg_index(dst)); \
break; \
} \
- CHECK_TERM(tr); \
+ CHECK_TERM(dst); \
} while (0)
#define PUT_TERM_REG(term, desc) \
@@ -371,45 +375,33 @@ do { \
/*
* process_main() is already huge, so we want to avoid inlining
- * into it. Especially functions that are seldom used.
+ * seldom used functions into it.
*/
-#ifdef __GNUC__
-# define NOINLINE __attribute__((__noinline__))
-#else
-# define NOINLINE
-#endif
-
-int tuple_module_apply;
-
-/*
- * The following functions are called directly by process_main().
- * Don't inline them.
- */
-static void init_emulator_finish(void) NOINLINE;
-static ErtsCodeMFA *ubif2mfa(void* uf) NOINLINE;
-static ErtsCodeMFA *gcbif2mfa(void* gcf) NOINLINE;
+static void init_emulator_finish(void) ERTS_NOINLINE;
+static ErtsCodeMFA *ubif2mfa(void* uf) ERTS_NOINLINE;
static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
- Eterm* reg, ErtsCodeMFA* bif_mfa) NOINLINE;
+ Eterm* reg, ErtsCodeMFA* bif_mfa) ERTS_NOINLINE;
static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa,
- Eterm* reg, Eterm func) NOINLINE;
+ Eterm* reg, Eterm func) ERTS_NOINLINE;
static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
- BeamInstr *I, Uint offs) NOINLINE;
+ BeamInstr *I, Uint offs) ERTS_NOINLINE;
static BeamInstr* apply(Process* p, Eterm* reg,
- BeamInstr *I, Uint offs) NOINLINE;
+ BeamInstr *I, Uint offs) ERTS_NOINLINE;
static BeamInstr* call_fun(Process* p, int arity,
- Eterm* reg, Eterm args) NOINLINE;
+ Eterm* reg, Eterm args) ERTS_NOINLINE;
static BeamInstr* apply_fun(Process* p, Eterm fun,
- Eterm args, Eterm* reg) NOINLINE;
+ Eterm args, Eterm* reg) ERTS_NOINLINE;
static Eterm new_fun(Process* p, Eterm* reg,
- ErlFunEntry* fe, int num_free) NOINLINE;
+ ErlFunEntry* fe, int num_free) ERTS_NOINLINE;
+static int is_function2(Eterm Term, Uint arity);
static Eterm erts_gc_new_map(Process* p, Eterm* reg, Uint live,
- Uint n, BeamInstr* ptr) NOINLINE;
+ Uint n, BeamInstr* ptr) ERTS_NOINLINE;
static Eterm erts_gc_new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal,
- Uint live, BeamInstr* ptr) NOINLINE;
+ Uint live, BeamInstr* ptr) ERTS_NOINLINE;
static Eterm erts_gc_update_map_assoc(Process* p, Eterm* reg, Uint live,
- Uint n, BeamInstr* new_p) NOINLINE;
+ Uint n, BeamInstr* new_p) ERTS_NOINLINE;
static Eterm erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live,
- Uint n, Eterm* new_p) NOINLINE;
+ Uint n, Eterm* new_p) ERTS_NOINLINE;
static Eterm get_map_element(Eterm map, Eterm key);
static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx);
@@ -422,6 +414,7 @@ static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
ErtsCodeMFA *bif_mfa, Eterm args);
static struct StackTrace * get_trace_from_exc(Eterm exc);
+static Eterm *get_freason_ptr_from_exc(Eterm exc);
static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
void
@@ -882,19 +875,22 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
#include "beam_warm.h"
OpCase(normal_exit): {
- SWAPOUT;
+ HEAVY_SWAPOUT;
c_p->freason = EXC_NORMAL;
c_p->arity = 0; /* In case this process will ever be garbed again. */
ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
erts_do_exit_process(c_p, am_normal);
ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ HEAVY_SWAPIN;
goto do_schedule;
}
OpCase(continue_exit): {
+ HEAVY_SWAPOUT;
ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
erts_continue_exit_process(c_p);
ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ HEAVY_SWAPIN;
goto do_schedule;
}
@@ -1302,18 +1298,6 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
}
static ErtsCodeMFA *
-gcbif2mfa(void* gcf)
-{
- int i;
- for (i = 0; erts_gc_bifs[i].bif; i++) {
- if (erts_gc_bifs[i].gc_bif == gcf)
- return &bif_export[erts_gc_bifs[i].exp_ix]->info.mfa;
- }
- erts_exit(ERTS_ERROR_EXIT, "bad gc bif");
- return NULL;
-}
-
-static ErtsCodeMFA *
ubif2mfa(void* uf)
{
int i;
@@ -1321,7 +1305,7 @@ ubif2mfa(void* uf)
if (erts_u_bifs[i].bif == uf)
return &bif_export[erts_u_bifs[i].exp_ix]->info.mfa;
}
- erts_exit(ERTS_ERROR_EXIT, "bad u bif");
+ erts_exit(ERTS_ERROR_EXIT, "bad u bif: %p\n", uf);
return NULL;
}
@@ -1485,9 +1469,24 @@ next_catch(Process* c_p, Eterm *reg) {
BeamInstr i_return_time_trace = beam_return_time_trace[0];
ptr = prev = c_p->stop;
- ASSERT(is_CP(*ptr));
ASSERT(ptr <= STACK_START(c_p));
- if (ptr == STACK_START(c_p)) return NULL;
+
+ /* This function is only called if we have active catch tags or have
+ * previously called a function that was exception-traced. As the exception
+ * trace flag isn't cleared after the traced function returns (and the
+ * catch tag inserted by it is gone), it's possible to land here with an
+ * empty stack, and the process should simply die when that happens. */
+ if (ptr == STACK_START(c_p)) {
+ ASSERT(!active_catches && IS_TRACED_FL(c_p, F_EXCEPTION_TRACE));
+ return NULL;
+ }
+
+ /*
+ * Better safe than sorry here. In debug builds, produce a core
+ * dump if the top of the stack doesn't point to a continuation
+ * pointer. In other builds, ignore a non-CP at the top of stack.
+ */
+ ASSERT(is_CP(*ptr));
if ((is_not_CP(*ptr) || (*cp_val(*ptr) != i_return_trace &&
*cp_val(*ptr) != i_return_to_trace &&
*cp_val(*ptr) != i_return_time_trace ))
@@ -1904,6 +1903,25 @@ static int is_raised_exc(Eterm exc) {
}
}
+static Eterm *get_freason_ptr_from_exc(Eterm exc) {
+ static Eterm dummy_freason;
+ struct StackTrace* s;
+
+ if (exc == NIL) {
+ /*
+ * Is is not exactly clear when exc can be NIL. Probably only
+ * when the exception has been generated from native code.
+ * Return a pointer to an Eterm that can be safely written and
+ * ignored.
+ */
+ return &dummy_freason;
+ } else {
+ ASSERT(is_list(exc));
+ s = (struct StackTrace *) big_val(CDR(list_val(exc)));
+ return &s->freason;
+ }
+}
+
/*
* Creating a list with the argument registers
*/
@@ -2211,7 +2229,6 @@ apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
Eterm module = reg[0];
Eterm function = reg[1];
Eterm args = reg[2];
- Eterm this;
/*
* Check the arguments which should be of the form apply(Module,
@@ -2234,20 +2251,8 @@ apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
while (1) {
Eterm m, f, a;
- /* The module argument may be either an atom or an abstract module
- * (currently implemented using tuples, but this might change).
- */
- this = THE_NON_VALUE;
- if (is_not_atom(module)) {
- Eterm* tp;
-
- if (!tuple_module_apply || is_not_tuple(module)) goto error;
- tp = tuple_val(module);
- if (arityval(tp[0]) < 1) goto error;
- this = module;
- module = tp[1];
- if (is_not_atom(module)) goto error;
- }
+
+ if (is_not_atom(module)) goto error;
if (module != am_erlang || function != am_apply)
break;
@@ -2282,9 +2287,7 @@ apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
}
/*
* Walk down the 3rd parameter of apply (the argument list) and copy
- * the parameters to the x registers (reg[]). If the module argument
- * was an abstract module, add 1 to the function arity and put the
- * module argument in the n+1st x register as a THIS reference.
+ * the parameters to the x registers (reg[]).
*/
tmp = args;
@@ -2301,9 +2304,6 @@ apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
if (is_not_nil(tmp)) { /* Must be well-formed list */
goto error;
}
- if (this != THE_NON_VALUE) {
- reg[arity++] = this;
- }
/*
* Get the index into the export table, or failing that the export
@@ -2334,27 +2334,20 @@ fixed_apply(Process* p, Eterm* reg, Uint arity,
function = reg[arity+1];
if (is_not_atom(function)) {
+ Eterm bad_args;
error:
- p->freason = BADARG;
- reg[0] = module;
- reg[1] = function;
- reg[2] = NIL;
- return 0;
- }
+ bad_args = make_arglist(p, reg, arity);
- /* The module argument may be either an atom or an abstract module
- * (currently implemented using tuples, but this might change).
- */
- if (is_not_atom(module)) {
- Eterm* tp;
- if (!tuple_module_apply || is_not_tuple(module)) goto error;
- tp = tuple_val(module);
- if (arityval(tp[0]) < 1) goto error;
- module = tp[1];
- if (is_not_atom(module)) goto error;
- ++arity;
+ p->freason = BADARG;
+ reg[0] = module;
+ reg[1] = function;
+ reg[2] = bad_args;
+
+ return 0;
}
+ if (is_not_atom(module)) goto error;
+
/* Handle apply of apply/3... */
if (module == am_erlang && function == am_apply && arity == 3) {
return apply(p, reg, I, stack_offset);
@@ -2701,6 +2694,19 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
return make_fun(funp);
}
+static int
+is_function2(Eterm Term, Uint arity)
+{
+ if (is_fun(Term)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(Term);
+ return funp->arity == arity;
+ } else if (is_export(Term)) {
+ Export* exp = (Export *) (export_val(Term)[1]);
+ return exp->info.mfa.arity == arity;
+ }
+ return 0;
+}
+
static Eterm get_map_element(Eterm map, Eterm key)
{
Uint32 hx;
@@ -3092,12 +3098,14 @@ erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p
Uint need;
flatmap_t *old_mp, *mp;
Eterm res;
+ Eterm* old_hp;
Eterm* hp;
Eterm* E;
Eterm* old_keys;
Eterm* old_vals;
Eterm new_key;
Eterm map;
+ int changed = 0;
n /= 2; /* Number of values to be updated */
ASSERT(n > 0);
@@ -3164,6 +3172,7 @@ erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p
* Update map, keeping the old key tuple.
*/
+ old_hp = p->htop;
hp = p->htop;
E = p->stop;
@@ -3186,20 +3195,26 @@ erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p
/* Not same keys */
*hp++ = *old_vals;
} else {
- GET_TERM(new_p[1], *hp);
- hp++;
- n--;
+ GET_TERM(new_p[1], *hp);
+ if(*hp != *old_vals) changed = 1;
+ hp++;
+ n--;
if (n == 0) {
- /*
- * All updates done. Copy remaining values
- * and return the result.
- */
- for (i++, old_vals++; i < num_old; i++) {
- *hp++ = *old_vals++;
- }
- ASSERT(hp == p->htop + need);
- p->htop = hp;
- return res;
+ /*
+ * All updates done. Copy remaining values
+ * if any changed or return the original one.
+ */
+ if(changed) {
+ for (i++, old_vals++; i < num_old; i++) {
+ *hp++ = *old_vals++;
+ }
+ ASSERT(hp == p->htop + need);
+ p->htop = hp;
+ return res;
+ } else {
+ p->htop = old_hp;
+ return map;
+ }
} else {
new_p += 2;
GET_TERM(*new_p, new_key);
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index e61199a8fd..941c3ebbbe 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -492,6 +492,11 @@ typedef struct LoaderState {
(Genop)->arity * sizeof(GenOpArg)); \
} while (0)
+#define GENOP_NAME_ARITY(Genop, Name, Arity) \
+ do { \
+ (Genop)->op = genop_##Name##_##Arity; \
+ (Genop)->arity = Arity; \
+ } while (0)
static void free_loader_state(Binary* magic);
static ErlHeapFragment* new_literal_fragment(Uint size);
@@ -1425,7 +1430,7 @@ load_atom_table(LoaderState* stp, ErtsAtomEncoding enc)
ap = atom_tab(atom_val(stp->atom[1]));
sys_memcpy(sbuf, ap->name, ap->len);
sbuf[ap->len] = '\0';
- LoadError1(stp, "module name in object code is %s", sbuf);
+ LoadError1(stp, "BEAM file exists but it defines a module named %s", sbuf);
}
return 1;
@@ -2868,6 +2873,7 @@ load_code(LoaderState* stp)
break;
case op_bs_put_string_WW:
case op_i_bs_match_string_xfWW:
+ case op_i_bs_match_string_yfWW:
new_string_patch(stp, ci-1);
break;
@@ -2969,6 +2975,8 @@ load_code(LoaderState* stp)
#define succ(St, X, Y) ((X).type == (Y).type && (X).val + 1 == (Y).val)
#define succ2(St, X, Y) ((X).type == (Y).type && (X).val + 2 == (Y).val)
#define succ3(St, X, Y) ((X).type == (Y).type && (X).val + 3 == (Y).val)
+#define succ4(St, X, Y) ((X).type == (Y).type && (X).val + 4 == (Y).val)
+
#ifdef NO_FPE_SIGNALS
#define no_fpe_signals(St) 1
@@ -2985,6 +2993,35 @@ compiled_with_otp_20_or_higher(LoaderState* stp)
}
/*
+ * Predicate that tests whether the following two moves are independent:
+ *
+ * move Src1 Dst1
+ * move Src2 Dst2
+ *
+ */
+static int
+independent_moves(LoaderState* stp, GenOpArg Src1, GenOpArg Dst1,
+ GenOpArg Src2, GenOpArg Dst2)
+{
+ return (Src1.type != Dst2.type || Src1.val != Dst2.val) &&
+ (Src2.type != Dst1.type || Src2.val != Dst1.val) &&
+ (Dst1.type != Dst2.type ||Dst1.val != Dst2.val);
+}
+
+/*
+ * Predicate that tests that two registers are distinct.
+ *
+ * move Src1 Dst1
+ * move Src2 Dst2
+ *
+ */
+static int
+distinct(LoaderState* stp, GenOpArg Reg1, GenOpArg Reg2)
+{
+ return Reg1.type != Reg2.type || Reg1.val != Reg2.val;
+}
+
+/*
* Predicate that tests whether a jump table can be used.
*/
@@ -3108,6 +3145,42 @@ is_killed(LoaderState* stp, GenOpArg Reg, GenOpArg Live)
Live.val <= Reg.val;
}
+static int
+is_killed_by_call_fun(LoaderState* stp, GenOpArg Reg, GenOpArg Live)
+{
+ return Reg.type == TAG_x && Live.type == TAG_u &&
+ Live.val+1 <= Reg.val;
+}
+
+/*
+ * Test whether register Reg is killed by make_fun instruction that
+ * creates the fun given by index idx.
+ */
+
+static int
+is_killed_by_make_fun(LoaderState* stp, GenOpArg Reg, GenOpArg idx)
+{
+ Uint num_free;
+
+ if (idx.val >= stp->num_lambdas) {
+ /* Invalid index. Ignore the error for now. */
+ return 0;
+ } else {
+ num_free = stp->lambdas[idx.val].num_free;
+ return Reg.type == TAG_x && num_free <= Reg.val;
+ }
+}
+
+/*
+ * Test whether register Reg is killed by the send instruction that follows.
+ */
+
+static int
+is_killed_by_send(LoaderState* stp, GenOpArg Reg)
+{
+ return Reg.type == TAG_x && 2 <= Reg.val;
+}
+
/*
* Generate an instruction for element/2.
*/
@@ -3119,20 +3192,19 @@ gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index,
GenOp* op;
NEW_GENOP(stp, op);
- op->arity = 4;
op->next = NULL;
if (Index.type == TAG_i && Index.val > 0 &&
Index.val <= ERTS_MAX_TUPLE_SIZE &&
(Tuple.type == TAG_x || Tuple.type == TAG_y)) {
- op->op = genop_i_fast_element_4;
+ GENOP_NAME_ARITY(op, i_fast_element, 4);
op->a[0] = Tuple;
op->a[1] = Fail;
op->a[2].type = TAG_u;
op->a[2].val = Index.val;
op->a[3] = Dst;
} else {
- op->op = genop_i_element_4;
+ GENOP_NAME_ARITY(op, i_element, 4);
op->a[0] = Tuple;
op->a[1] = Fail;
op->a[2] = Index;
@@ -3148,8 +3220,7 @@ gen_bs_save(LoaderState* stp, GenOpArg Reg, GenOpArg Index)
GenOp* op;
NEW_GENOP(stp, op);
- op->op = genop_i_bs_save2_2;
- op->arity = 2;
+ GENOP_NAME_ARITY(op, i_bs_save2, 2);
op->a[0] = Reg;
op->a[1] = Index;
if (Index.type == TAG_u) {
@@ -3168,8 +3239,7 @@ gen_bs_restore(LoaderState* stp, GenOpArg Reg, GenOpArg Index)
GenOp* op;
NEW_GENOP(stp, op);
- op->op = genop_i_bs_restore2_2;
- op->arity = 2;
+ GENOP_NAME_ARITY(op, i_bs_restore2, 2);
op->a[0] = Reg;
op->a[1] = Index;
if (Index.type == TAG_u) {
@@ -3203,21 +3273,18 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
} else if ((Flags.val & BSF_SIGNED) != 0) {
goto generic;
} else if (bits == 8) {
- op->op = genop_i_bs_get_integer_8_3;
- op->arity = 3;
- op->a[0] = Ms;
+ GENOP_NAME_ARITY(op, i_bs_get_integer_8, 3);
+ op->a[0] = Ms;
op->a[1] = Fail;
op->a[2] = Dst;
} else if (bits == 16 && (Flags.val & BSF_LITTLE) == 0) {
- op->op = genop_i_bs_get_integer_16_3;
- op->arity = 3;
+ GENOP_NAME_ARITY(op, i_bs_get_integer_16, 3);
op->a[0] = Ms;
op->a[1] = Fail;
op->a[2] = Dst;
#ifdef ARCH_64
} else if (bits == 32 && (Flags.val & BSF_LITTLE) == 0) {
- op->op = genop_i_bs_get_integer_32_3;
- op->arity = 3;
+ GENOP_NAME_ARITY(op, i_bs_get_integer_32, 3);
op->a[0] = Ms;
op->a[1] = Fail;
op->a[2] = Dst;
@@ -3225,8 +3292,7 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
} else {
generic:
if (bits < SMALL_BITS) {
- op->op = genop_i_bs_get_integer_small_imm_5;
- op->arity = 5;
+ GENOP_NAME_ARITY(op, i_bs_get_integer_small_imm, 5);
op->a[0] = Ms;
op->a[1].type = TAG_u;
op->a[1].val = bits;
@@ -3234,8 +3300,7 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
op->a[3] = Flags;
op->a[4] = Dst;
} else {
- op->op = genop_i_bs_get_integer_imm_6;
- op->arity = 6;
+ GENOP_NAME_ARITY(op, i_bs_get_integer_imm, 6);
op->a[0] = Ms;
op->a[1].type = TAG_u;
op->a[1].val = bits;
@@ -3251,8 +3316,7 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
if (!term_to_Uint(big, &bigval)) {
error:
- op->op = genop_jump_1;
- op->arity = 1;
+ GENOP_NAME_ARITY(op, jump, 1);
op->a[0] = Fail;
} else {
if (!safe_mul(bigval, Unit.val, &bits)) {
@@ -3261,13 +3325,12 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
goto generic;
}
} else {
- op->op = genop_i_bs_get_integer_6;
- op->arity = 6;
- op->a[0] = Fail;
- op->a[1] = Live;
- op->a[2].type = TAG_u;
- op->a[2].val = (Unit.val << 3) | Flags.val;
- op->a[3] = Ms;
+ GENOP_NAME_ARITY(op, i_bs_get_integer, 6);
+ op->a[0] = Ms;
+ op->a[1] = Fail;
+ op->a[2] = Live;
+ op->a[3].type = TAG_u;
+ op->a[3].val = (Unit.val << 3) | Flags.val;
op->a[4] = Size;
op->a[5] = Dst;
op->next = NULL;
@@ -3291,26 +3354,16 @@ gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
NATIVE_ENDIAN(Flags);
if (Size.type == TAG_a && Size.val == am_all) {
- if (Ms.type == Dst.type && Ms.val == Dst.val) {
- op->op = genop_i_bs_get_binary_all_reuse_3;
- op->arity = 3;
- op->a[0] = Ms;
- op->a[1] = Fail;
- op->a[2] = Unit;
- } else {
- op->op = genop_i_bs_get_binary_all2_5;
- op->arity = 5;
- op->a[0] = Fail;
- op->a[1] = Ms;
- op->a[2] = Live;
- op->a[3] = Unit;
- op->a[4] = Dst;
- }
+ GENOP_NAME_ARITY(op, i_bs_get_binary_all2, 5);
+ op->a[0] = Ms;
+ op->a[1] = Fail;
+ op->a[2] = Live;
+ op->a[3] = Unit;
+ op->a[4] = Dst;
} else if (Size.type == TAG_i) {
- op->op = genop_i_bs_get_binary_imm2_6;
- op->arity = 6;
- op->a[0] = Fail;
- op->a[1] = Ms;
+ GENOP_NAME_ARITY(op, i_bs_get_binary_imm2, 6);
+ op->a[0] = Ms;
+ op->a[1] = Fail;
op->a[2] = Live;
op->a[3].type = TAG_u;
if (!safe_mul(Size.val, Unit.val, &op->a[3].val)) {
@@ -3324,14 +3377,12 @@ gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
if (!term_to_Uint(big, &bigval)) {
error:
- op->op = genop_jump_1;
- op->arity = 1;
+ GENOP_NAME_ARITY(op, jump, 1);
op->a[0] = Fail;
} else {
- op->op = genop_i_bs_get_binary_imm2_6;
- op->arity = 6;
- op->a[0] = Fail;
- op->a[1] = Ms;
+ GENOP_NAME_ARITY(op, i_bs_get_binary_imm2, 6);
+ op->a[0] = Ms;
+ op->a[1] = Fail;
op->a[2] = Live;
op->a[3].type = TAG_u;
if (!safe_mul(bigval, Unit.val, &op->a[3].val)) {
@@ -3341,10 +3392,9 @@ gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
op->a[5] = Dst;
}
} else {
- op->op = genop_i_bs_get_binary2_6;
- op->arity = 6;
- op->a[0] = Fail;
- op->a[1] = Ms;
+ GENOP_NAME_ARITY(op, i_bs_get_binary2, 6);
+ op->a[0] = Ms;
+ op->a[1] = Fail;
op->a[2] = Live;
op->a[3] = Size;
op->a[4].type = TAG_u;
@@ -3375,26 +3425,44 @@ gen_put_binary(LoaderState* stp, GenOpArg Fail,GenOpArg Size,
NATIVE_ENDIAN(Flags);
if (Size.type == TAG_a && Size.val == am_all) {
- op->op = genop_i_new_bs_put_binary_all_3;
- op->arity = 3;
- op->a[0] = Fail;
- op->a[1] = Src;
+ GENOP_NAME_ARITY(op, i_new_bs_put_binary_all, 3);
+ op->a[0] = Src;
+ op->a[1] = Fail;
op->a[2] = Unit;
} else if (Size.type == TAG_i) {
- op->op = genop_i_new_bs_put_binary_imm_3;
- op->arity = 3;
+ GENOP_NAME_ARITY(op, i_new_bs_put_binary_imm, 3);
op->a[0] = Fail;
op->a[1].type = TAG_u;
if (safe_mul(Size.val, Unit.val, &op->a[1].val)) {
op->a[2] = Src;
} else {
- op->op = genop_badarg_1;
- op->arity = 1;
+ error:
+ GENOP_NAME_ARITY(op, badarg, 1);
op->a[0] = Fail;
}
+ } else if (Size.type == TAG_q) {
+#ifdef ARCH_64
+ /*
+ * There is no way that this binary would fit in memory.
+ */
+ goto error;
+#else
+ Eterm big = stp->literals[Size.val].term;
+ Uint bigval;
+ Uint size;
+
+ if (!term_to_Uint(big, &bigval) ||
+ !safe_mul(bigval, Unit.val, &size)) {
+ goto error;
+ }
+ GENOP_NAME_ARITY(op, i_new_bs_put_binary_imm, 3);
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
+ op->a[1].val = size;
+ op->a[2] = Src;
+#endif
} else {
- op->op = genop_i_new_bs_put_binary_4;
- op->arity = 4;
+ GENOP_NAME_ARITY(op, i_new_bs_put_binary, 4);
op->a[0] = Fail;
op->a[1] = Size;
op->a[2].type = TAG_u;
@@ -3416,41 +3484,39 @@ gen_put_integer(LoaderState* stp, GenOpArg Fail, GenOpArg Size,
NATIVE_ENDIAN(Flags);
/* Negative size must fail */
if (Size.type == TAG_i) {
- op->op = genop_i_new_bs_put_integer_imm_4;
- op->arity = 4;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
- if (!safe_mul(Size.val, Unit.val, &op->a[1].val)) {
+ Uint size;
+ if (!safe_mul(Size.val, Unit.val, &size)) {
error:
- op->op = genop_badarg_1;
- op->arity = 1;
+ GENOP_NAME_ARITY(op, badarg, 1);
op->a[0] = Fail;
op->next = NULL;
return op;
}
- op->a[1].val = Size.val * Unit.val;
- op->a[2].type = Flags.type;
- op->a[2].val = (Flags.val & 7);
- op->a[3] = Src;
+ GENOP_NAME_ARITY(op, i_new_bs_put_integer_imm, 4);
+ op->a[0] = Src;
+ op->a[1] = Fail;
+ op->a[2].type = TAG_u;
+ op->a[2].val = size;
+ op->a[3].type = Flags.type;
+ op->a[3].val = (Flags.val & 7);
} else if (Size.type == TAG_q) {
Eterm big = stp->literals[Size.val].term;
Uint bigval;
+ Uint size;
- if (!term_to_Uint(big, &bigval)) {
+ if (!term_to_Uint(big, &bigval) ||
+ !safe_mul(bigval, Unit.val, &size)) {
goto error;
- } else {
- op->op = genop_i_new_bs_put_integer_imm_4;
- op->arity = 4;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
- op->a[1].val = bigval * Unit.val;
- op->a[2].type = Flags.type;
- op->a[2].val = (Flags.val & 7);
- op->a[3] = Src;
}
+ GENOP_NAME_ARITY(op, i_new_bs_put_integer_imm, 4);
+ op->a[0] = Src;
+ op->a[1] = Fail;
+ op->a[2].type = TAG_u;
+ op->a[2].val = size;
+ op->a[3].type = Flags.type;
+ op->a[3].val = (Flags.val & 7);
} else {
- op->op = genop_i_new_bs_put_integer_4;
- op->arity = 4;
+ GENOP_NAME_ARITY(op, i_new_bs_put_integer, 4);
op->a[0] = Fail;
op->a[1] = Size;
op->a[2].type = TAG_u;
@@ -3470,21 +3536,18 @@ gen_put_float(LoaderState* stp, GenOpArg Fail, GenOpArg Size,
NATIVE_ENDIAN(Flags);
if (Size.type == TAG_i) {
- op->op = genop_i_new_bs_put_float_imm_4;
- op->arity = 4;
+ GENOP_NAME_ARITY(op, i_new_bs_put_float_imm, 4);
op->a[0] = Fail;
op->a[1].type = TAG_u;
if (!safe_mul(Size.val, Unit.val, &op->a[1].val)) {
- op->op = genop_badarg_1;
- op->arity = 1;
+ GENOP_NAME_ARITY(op, badarg, 1);
op->a[0] = Fail;
} else {
op->a[2] = Flags;
op->a[3] = Src;
}
} else {
- op->op = genop_i_new_bs_put_float_4;
- op->arity = 4;
+ GENOP_NAME_ARITY(op, i_new_bs_put_float, 4);
op->a[0] = Fail;
op->a[1] = Size;
op->a[2].type = TAG_u;
@@ -3507,10 +3570,9 @@ gen_get_float2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
NEW_GENOP(stp, op);
NATIVE_ENDIAN(Flags);
- op->op = genop_i_bs_get_float2_6;
- op->arity = 6;
- op->a[0] = Fail;
- op->a[1] = Ms;
+ GENOP_NAME_ARITY(op, i_bs_get_float2, 6);
+ op->a[0] = Ms;
+ op->a[1] = Fail;
op->a[2] = Live;
op->a[3] = Size;
op->a[4].type = TAG_u;
@@ -3525,7 +3587,7 @@ gen_get_float2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
*/
static GenOp*
-gen_skip_bits2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms,
+gen_skip_bits2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms,
GenOpArg Size, GenOpArg Unit, GenOpArg Flags)
{
GenOp* op;
@@ -3533,16 +3595,26 @@ gen_skip_bits2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms,
NATIVE_ENDIAN(Flags);
NEW_GENOP(stp, op);
if (Size.type == TAG_a && Size.val == am_all) {
- op->op = genop_i_bs_skip_bits_all2_3;
- op->arity = 3;
+ /*
+ * This kind of skip instruction will only be found in modules
+ * compiled before OTP 19. From OTP 19, the compiler generates
+ * a test_unit instruction of a bs_skip at the end of a
+ * binary.
+ *
+ * It is safe to replace the skip instruction with a test_unit
+ * instruction, because the position will never be used again.
+ * If the match context itself is used again, it will be used by
+ * a bs_restore2 instruction which will overwrite the position
+ * by one of the stored positions.
+ */
+ GENOP_NAME_ARITY(op, bs_test_unit, 3);
op->a[0] = Fail;
- op->a[1] = Ms;
+ op->a[1] = Ms;
op->a[2] = Unit;
} else if (Size.type == TAG_i) {
- op->op = genop_i_bs_skip_bits_imm2_3;
- op->arity = 3;
+ GENOP_NAME_ARITY(op, i_bs_skip_bits_imm2, 3);
op->a[0] = Fail;
- op->a[1] = Ms;
+ op->a[1] = Ms;
op->a[2].type = TAG_u;
if (!safe_mul(Size.val, Unit.val, &op->a[2].val)) {
goto error;
@@ -3553,25 +3625,22 @@ gen_skip_bits2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms,
if (!term_to_Uint(big, &bigval)) {
error:
- op->op = genop_jump_1;
- op->arity = 1;
+ GENOP_NAME_ARITY(op, jump, 1);
op->a[0] = Fail;
} else {
- op->op = genop_i_bs_skip_bits_imm2_3;
- op->arity = 3;
+ GENOP_NAME_ARITY(op, i_bs_skip_bits_imm2, 3);
op->a[0] = Fail;
- op->a[1] = Ms;
+ op->a[1] = Ms;
op->a[2].type = TAG_u;
if (!safe_mul(bigval, Unit.val, &op->a[2].val)) {
goto error;
}
}
} else {
- op->op = genop_i_bs_skip_bits2_4;
- op->arity = 4;
- op->a[0] = Fail;
- op->a[1] = Ms;
- op->a[2] = Size;
+ GENOP_NAME_ARITY(op, i_bs_skip_bits2, 4);
+ op->a[0] = Ms;
+ op->a[1] = Size;
+ op->a[2] = Fail;
op->a[3] = Unit;
}
op->next = NULL;
@@ -3579,38 +3648,52 @@ gen_skip_bits2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms,
}
static GenOp*
-gen_increment(LoaderState* stp, GenOpArg Reg, GenOpArg Integer,
- GenOpArg Live, GenOpArg Dst)
+gen_increment(LoaderState* stp, GenOpArg Reg,
+ GenOpArg Integer, GenOpArg Dst)
{
GenOp* op;
NEW_GENOP(stp, op);
- op->op = genop_i_increment_4;
- op->arity = 4;
+ GENOP_NAME_ARITY(op, i_increment, 3);
op->next = NULL;
op->a[0] = Reg;
op->a[1].type = TAG_u;
op->a[1].val = Integer.val;
- op->a[2] = Live;
- op->a[3] = Dst;
+ op->a[2] = Dst;
return op;
}
static GenOp*
-gen_increment_from_minus(LoaderState* stp, GenOpArg Reg, GenOpArg Integer,
- GenOpArg Live, GenOpArg Dst)
+gen_increment_from_minus(LoaderState* stp, GenOpArg Reg,
+ GenOpArg Integer, GenOpArg Dst)
{
GenOp* op;
NEW_GENOP(stp, op);
- op->op = genop_i_increment_4;
- op->arity = 4;
+ GENOP_NAME_ARITY(op, i_increment, 3);
op->next = NULL;
op->a[0] = Reg;
op->a[1].type = TAG_u;
op->a[1].val = -Integer.val;
- op->a[2] = Live;
- op->a[3] = Dst;
+ op->a[2] = Dst;
+ return op;
+}
+
+static GenOp*
+gen_plus_from_minus(LoaderState* stp, GenOpArg Fail, GenOpArg Live,
+ GenOpArg Src, GenOpArg Integer, GenOpArg Dst)
+{
+ GenOp* op;
+
+ NEW_GENOP(stp, op);
+ GENOP_NAME_ARITY(op, gen_plus, 5);
+ op->next = NULL;
+ op->a[0] = Fail;
+ op->a[1] = Live;
+ op->a[2] = Src;
+ op->a[3].type = TAG_i;
+ op->a[3].val = -Integer.val;
+ op->a[4] = Dst;
return op;
}
@@ -3660,12 +3743,11 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
Sint timeout;
NEW_GENOP(stp, op);
- op->op = genop_wait_timeout_unlocked_int_2;
+ GENOP_NAME_ARITY(op, wait_timeout_unlocked_int, 2);
op->next = NULL;
- op->arity = 2;
op->a[0].type = TAG_u;
op->a[1] = Fail;
-
+
if (Time.type == TAG_i && (timeout = Time.val) >= 0 &&
#if defined(ARCH_64)
(timeout >> 32) == 0
@@ -3694,8 +3776,7 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
#if !defined(ARCH_64)
error:
#endif
- op->op = genop_i_wait_error_0;
- op->arity = 0;
+ GENOP_NAME_ARITY(op, i_wait_error, 0);
}
return op;
}
@@ -3707,9 +3788,8 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
Sint timeout;
NEW_GENOP(stp, op);
- op->op = genop_wait_timeout_locked_int_2;
+ GENOP_NAME_ARITY(op, wait_timeout_locked_int, 2);
op->next = NULL;
- op->arity = 2;
op->a[0].type = TAG_u;
op->a[1] = Fail;
@@ -3741,8 +3821,7 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
#if !defined(ARCH_64)
error:
#endif
- op->op = genop_i_wait_error_locked_0;
- op->arity = 0;
+ GENOP_NAME_ARITY(op, i_wait_error_locked, 0);
}
return op;
}
@@ -3779,9 +3858,9 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
*/
if (size == 2) {
NEW_GENOP(stp, op);
- op->next = NULL;
- op->op = genop_i_select_tuple_arity2_4;
+ GENOP_NAME_ARITY(op, i_select_tuple_arity2, 4);
GENOP_ARITY(op, arity - 1);
+ op->next = NULL;
op->a[0] = S;
op->a[1] = Fail;
op->a[2].type = TAG_u;
@@ -3807,9 +3886,9 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
size += align;
NEW_GENOP(stp, op);
- op->next = NULL;
- op->op = genop_i_select_tuple_arity_3;
+ GENOP_NAME_ARITY(op, i_select_tuple_arity, 3);
GENOP_ARITY(op, arity);
+ op->next = NULL;
op->a[0] = S;
op->a[1] = Fail;
op->a[2].type = TAG_u;
@@ -3865,21 +3944,18 @@ gen_split_values(LoaderState* stp, GenOpArg S, GenOpArg TypeFail,
ASSERT(Size.val >= 2 && Size.val % 2 == 0);
NEW_GENOP(stp, is_integer);
- is_integer->op = genop_is_integer_2;
- is_integer->arity = 2;
+ GENOP_NAME_ARITY(is_integer, is_integer, 2);
is_integer->a[0] = TypeFail;
is_integer->a[1] = S;
NEW_GENOP(stp, label);
- label->op = genop_label_1;
- label->arity = 1;
+ GENOP_NAME_ARITY(label, label, 1);
label->a[0].type = TAG_u;
label->a[0].val = new_label(stp);
NEW_GENOP(stp, op1);
- op1->op = genop_select_val_3;
+ GENOP_NAME_ARITY(op1, select_val, 3);
GENOP_ARITY(op1, 3 + Size.val);
- op1->arity = 3;
op1->a[0] = S;
op1->a[1].type = TAG_f;
op1->a[1].val = label->a[0].val;
@@ -3887,9 +3963,8 @@ gen_split_values(LoaderState* stp, GenOpArg S, GenOpArg TypeFail,
op1->a[2].val = 0;
NEW_GENOP(stp, op2);
- op2->op = genop_select_val_3;
+ GENOP_NAME_ARITY(op2, select_val, 3);
GENOP_ARITY(op2, 3 + Size.val);
- op2->arity = 3;
op2->a[0] = S;
op2->a[1] = Fail;
op2->a[2].type = TAG_u;
@@ -3967,19 +4042,17 @@ gen_jump_tab(LoaderState* stp, GenOpArg S, GenOpArg Fail, GenOpArg Size, GenOpAr
GenOp* jump;
NEW_GENOP(stp, op);
- op->arity = 3;
- op->op = genop_is_ne_exact_3;
+ GENOP_NAME_ARITY(op, is_ne_exact, 3);
op->a[0] = Rest[1];
op->a[1] = S;
op->a[2] = Rest[0];
NEW_GENOP(stp, jump);
- jump->next = NULL;
- jump->arity = 1;
- jump->op = genop_jump_1;
+ GENOP_NAME_ARITY(jump, jump, 1);
jump->a[0] = Fail;
op->next = jump;
+ jump->next = NULL;
return op;
}
@@ -4006,12 +4079,11 @@ gen_jump_tab(LoaderState* stp, GenOpArg S, GenOpArg Fail, GenOpArg Size, GenOpAr
NEW_GENOP(stp, op);
op->next = NULL;
if (min == 0) {
- op->op = genop_i_jump_on_val_zero_3;
- fixed_args = 3;
+ GENOP_NAME_ARITY(op, i_jump_on_val_zero, 3);
} else {
- op->op = genop_i_jump_on_val_4;
- fixed_args = 4;
+ GENOP_NAME_ARITY(op, i_jump_on_val, 4);
}
+ fixed_args = op->arity;
arity = fixed_args + size;
GENOP_ARITY(op, arity);
op->a[0] = S;
@@ -4076,7 +4148,7 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
NEW_GENOP(stp, op);
op->next = NULL;
- op->op = genop_i_select_val2_4;
+ GENOP_NAME_ARITY(op, i_select_val2, 4);
GENOP_ARITY(op, arity - 1);
op->a[0] = S;
op->a[1] = Fail;
@@ -4098,7 +4170,11 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
NEW_GENOP(stp, op);
op->next = NULL;
- op->op = (align == 0) ? genop_i_select_val_bins_3 : genop_i_select_val_lins_3;
+ if (align == 0) {
+ GENOP_NAME_ARITY(op, i_select_val_bins, 3);
+ } else {
+ GENOP_NAME_ARITY(op, i_select_val_lins, 3);
+ }
GENOP_ARITY(op, arity);
op->a[0] = S;
op->a[1] = Fail;
@@ -4162,8 +4238,7 @@ gen_select_literals(LoaderState* stp, GenOpArg S, GenOpArg Fail,
ASSERT(Rest[i].type == TAG_q);
NEW_GENOP(stp, op);
- op->op = genop_is_ne_exact_3;
- op->arity = 3;
+ GENOP_NAME_ARITY(op, is_ne_exact, 3);
op->a[0] = Rest[i+1];
op->a[1] = S;
op->a[2] = Rest[i];
@@ -4172,9 +4247,8 @@ gen_select_literals(LoaderState* stp, GenOpArg S, GenOpArg Fail,
}
NEW_GENOP(stp, jump);
+ GENOP_NAME_ARITY(jump, jump, 1);
jump->next = NULL;
- jump->op = genop_jump_1;
- jump->arity = 1;
jump->a[0] = Fail;
*prev_next = jump;
return op;
@@ -4196,9 +4270,8 @@ const_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
ASSERT(Size.type == TAG_u);
NEW_GENOP(stp, op);
+ GENOP_NAME_ARITY(op, jump, 1);
op->next = NULL;
- op->op = genop_jump_1;
- op->arity = 1;
/*
* Search for a literal matching the controlling expression.
@@ -4245,115 +4318,120 @@ gen_make_fun2(LoaderState* stp, GenOpArg idx)
{
ErlFunEntry* fe;
GenOp* op;
+ Uint arity, num_free;
if (idx.val >= stp->num_lambdas) {
- stp->lambda_error = "missing or short chunk 'FunT'";
- fe = 0;
+ stp->lambda_error = "missing or short chunk 'FunT'";
+ fe = 0;
+ num_free = 0;
+ arity = 0;
} else {
- fe = stp->lambdas[idx.val].fe;
+ fe = stp->lambdas[idx.val].fe;
+ num_free = stp->lambdas[idx.val].num_free;
+ arity = fe->arity;
}
NEW_GENOP(stp, op);
- op->op = genop_i_make_fun_2;
- op->arity = 2;
- op->a[0].type = TAG_u;
- op->a[0].val = (BeamInstr) fe;
- op->a[1].type = TAG_u;
- op->a[1].val = stp->lambdas[idx.val].num_free;
- op->next = NULL;
- return op;
-}
-static GenOp*
-translate_gc_bif(LoaderState* stp, GenOp* op, GenOpArg Bif)
-{
- const ErtsGcBif* p;
- BifFunction bf;
-
- bf = stp->import[Bif.val].bf;
- for (p = erts_gc_bifs; p->bif != 0; p++) {
- if (p->bif == bf) {
- op->a[1].type = TAG_u;
- op->a[1].val = (BeamInstr) p->gc_bif;
- return op;
- }
+ /*
+ * It's possible this is called before init process is started,
+ * skip the optimisation in such case.
+ */
+ if (num_free == 0 && erts_init_process_id != ERTS_INVALID_PID) {
+ Uint lit;
+ Eterm* hp;
+ ErlFunThing* funp;
+
+ lit = new_literal(stp, &hp, ERL_FUN_SIZE);
+ funp = (ErlFunThing *) hp;
+ erts_refc_inc(&fe->refc, 2);
+ funp->thing_word = HEADER_FUN;
+ funp->next = NULL;
+ funp->fe = fe;
+ funp->num_free = 0;
+ funp->creator = erts_init_process_id;
+ funp->arity = arity;
+
+ /*
+ * Use a move_fun/2 instruction to load the fun to enable
+ * further optimizations.
+ */
+ GENOP_NAME_ARITY(op, move_fun, 2);
+ op->a[0].type = TAG_q;
+ op->a[0].val = lit;
+ op->a[1].type = TAG_x;
+ op->a[1].val = 0;
+ } else {
+ GENOP_NAME_ARITY(op, i_make_fun, 2);
+ op->a[0].type = TAG_u;
+ op->a[0].val = (BeamInstr) fe;
+ op->a[1].type = TAG_u;
+ op->a[1].val = num_free;
}
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
+ op->next = NULL;
return op;
}
-/*
- * Rewrite gc_bifs with one parameter (the common case).
- */
static GenOp*
-gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
- GenOpArg Src, GenOpArg Dst)
+gen_is_function2(LoaderState* stp, GenOpArg Fail, GenOpArg Fun, GenOpArg Arity)
{
GenOp* op;
+ int literal_arity = Arity.type == TAG_i;
+ int fun_is_reg = Fun.type == TAG_x || Fun.type == TAG_y;
NEW_GENOP(stp, op);
op->next = NULL;
- op->op = genop_i_gc_bif1_5;
- op->arity = 5;
- op->a[0] = Fail;
- /* op->a[1] is set by translate_gc_bif() */
- op->a[2] = Src;
- op->a[3] = Live;
- op->a[4] = Dst;
- return translate_gc_bif(stp, op, Bif);
-}
-/*
- * This is used by the ops.tab rule that rewrites gc_bifs with two parameters.
- */
-static GenOp*
-gen_guard_bif2(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
- GenOpArg S1, GenOpArg S2, GenOpArg Dst)
-{
- GenOp* op;
+ if (fun_is_reg &&literal_arity) {
+ /*
+ * Most common case. Fun in a register and arity
+ * is an integer literal.
+ */
+ if (Arity.val > MAX_ARG) {
+ /* Arity is negative or too big. */
+ GENOP_NAME_ARITY(op, jump, 1);
+ op->a[0] = Fail;
+ return op;
+ } else {
+ GENOP_NAME_ARITY(op, hot_is_function2, 3);
+ op->a[0] = Fail;
+ op->a[1] = Fun;
+ op->a[2].type = TAG_u;
+ op->a[2].val = Arity.val;
+ return op;
+ }
+ } else {
+ /*
+ * Handle extremely uncommon cases by a slower sequence.
+ */
+ GenOp* move_fun;
+ GenOp* move_arity;
- NEW_GENOP(stp, op);
- op->next = NULL;
- op->op = genop_i_gc_bif2_6;
- op->arity = 6;
- op->a[0] = Fail;
- /* op->a[1] is set by translate_gc_bif() */
- op->a[2] = Live;
- op->a[3] = S1;
- op->a[4] = S2;
- op->a[5] = Dst;
- return translate_gc_bif(stp, op, Bif);
-}
+ NEW_GENOP(stp, move_fun);
+ NEW_GENOP(stp, move_arity);
-/*
- * This is used by the ops.tab rule that rewrites gc_bifs with three parameters.
- */
-static GenOp*
-gen_guard_bif3(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
- GenOpArg S1, GenOpArg S2, GenOpArg S3, GenOpArg Dst)
-{
- GenOp* op;
+ move_fun->next = move_arity;
+ move_arity->next = op;
- NEW_GENOP(stp, op);
- op->next = NULL;
- op->op = genop_ii_gc_bif3_7;
- op->arity = 7;
- op->a[0] = Fail;
- /* op->a[1] is set by translate_gc_bif() */
- op->a[2] = Live;
- op->a[3] = S1;
- op->a[4] = S2;
- op->a[5] = S3;
- op->a[6] = Dst;
- return translate_gc_bif(stp, op, Bif);
+ GENOP_NAME_ARITY(move_fun, move, 2);
+ move_fun->a[0] = Fun;
+ move_fun->a[1].type = TAG_x;
+ move_fun->a[1].val = 1022;
+
+ GENOP_NAME_ARITY(move_arity, move, 2);
+ move_arity->a[0] = Arity;
+ move_arity->a[1].type = TAG_x;
+ move_arity->a[1].val = 1023;
+
+ GENOP_NAME_ARITY(op, cold_is_function2, 3);
+ op->a[0] = Fail;
+ op->a[1].type = TAG_x;
+ op->a[1].val = 1022;
+ op->a[2].type = TAG_x;
+ op->a[2].val = 1023;
+ return move_fun;
+ }
}
static GenOp*
@@ -4367,8 +4445,8 @@ tuple_append_put5(LoaderState* stp, GenOpArg Arity, GenOpArg Dst,
NEW_GENOP(stp, op);
op->next = NULL;
+ GENOP_NAME_ARITY(op, i_put_tuple, 2);
GENOP_ARITY(op, arity+2+5);
- op->op = genop_i_put_tuple_2;
op->a[0] = Dst;
op->a[1].type = TAG_u;
op->a[1].val = arity + 5;
@@ -4393,8 +4471,8 @@ tuple_append_put(LoaderState* stp, GenOpArg Arity, GenOpArg Dst,
NEW_GENOP(stp, op);
op->next = NULL;
+ GENOP_NAME_ARITY(op, i_put_tuple, 2);
GENOP_ARITY(op, arity+2+1);
- op->op = genop_i_put_tuple_2;
op->a[0] = Dst;
op->a[1].type = TAG_u;
op->a[1].val = arity + 1;
@@ -4462,9 +4540,9 @@ gen_new_small_map_lit(LoaderState* stp, GenOpArg Dst, GenOpArg Live,
Eterm keys;
NEW_GENOP(stp, op);
+ GENOP_NAME_ARITY(op, i_new_small_map_lit, 3);
GENOP_ARITY(op, 3 + size/2);
op->next = NULL;
- op->op = genop_i_new_small_map_lit_3;
tmp = thp = erts_alloc(ERTS_ALC_T_LOADER_TMP, (1 + size/2) * sizeof(*tmp));
keys = make_tuple(thp);
@@ -4522,19 +4600,6 @@ is_empty_map(LoaderState* stp, GenOpArg Lit)
}
/*
- * Predicate to test whether the given literal is an export.
- */
-static int
-literal_is_export(LoaderState* stp, GenOpArg Lit)
-{
- Eterm term;
-
- ASSERT(Lit.type == TAG_q);
- term = stp->literals[Lit.val].term;
- return is_export(term);
-}
-
-/*
* Pseudo predicate map_key_sort that will sort the Rest operand for
* map instructions as a side effect.
*/
@@ -4658,14 +4723,12 @@ gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
Key = Rest[0];
if (hash_genop_arg(stp, Key, &hx)) {
- op->arity = 5;
- op->op = genop_i_get_map_element_hash_5;
+ GENOP_NAME_ARITY(op, i_get_map_element_hash, 5);
op->a[3].type = TAG_u;
op->a[3].val = (BeamInstr) hx;
op->a[4] = Rest[1];
} else {
- op->arity = 4;
- op->op = genop_i_get_map_element_4;
+ GENOP_NAME_ARITY(op, i_get_map_element, 4);
op->a[3] = Rest[1];
}
return op;
@@ -4705,15 +4768,13 @@ gen_get(LoaderState* stp, GenOpArg Src, GenOpArg Dst)
NEW_GENOP(stp, op);
op->next = NULL;
if (hash_internal_genop_arg(stp, Src, &hx)) {
- op->arity = 3;
- op->op = genop_i_get_hash_3;
+ GENOP_NAME_ARITY(op, i_get_hash, 3);
op->a[0] = Src;
op->a[1].type = TAG_u;
op->a[1].val = (BeamInstr) hx;
op->a[2] = Dst;
} else {
- op->arity = 2;
- op->op = genop_i_get_2;
+ GENOP_NAME_ARITY(op, i_get, 2);
op->a[0] = Src;
op->a[1] = Dst;
}
@@ -4737,7 +4798,7 @@ gen_get_map_elements(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
ASSERT(Size.type == TAG_u);
NEW_GENOP(stp, op);
- op->op = genop_i_get_map_elements_3;
+ GENOP_NAME_ARITY(op, i_get_map_elements, 3);
GENOP_ARITY(op, 3 + 3*(Size.val/2));
op->next = NULL;
op->a[0] = Fail;
@@ -4775,9 +4836,9 @@ gen_has_map_fields(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
n = Size.val;
NEW_GENOP(stp, op);
+ GENOP_NAME_ARITY(op, get_map_elements, 3);
GENOP_ARITY(op, 3 + 2*n);
op->next = NULL;
- op->op = genop_get_map_elements_3;
op->a[0] = Fail;
op->a[1] = Src;
@@ -6114,7 +6175,8 @@ erts_release_literal_area(ErtsLiteralArea* literal_area)
}
default:
ASSERT(is_external_header(oh->thing_word));
- erts_deref_node_entry(((ExternalThing*)oh)->node);
+ erts_deref_node_entry(((ExternalThing*)oh)->node,
+ make_boxed(&oh->thing_word));
}
oh = oh->next;
}
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 000397e790..b81056c774 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -184,7 +184,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
DistEntry *dep;
ErtsLink *lnk;
int code;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
dep = external_pid_dist_entry(BIF_ARG_1);
if (dep == erts_this_dist_entry)
@@ -201,9 +201,9 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
ldp = erts_link_to_data(lnk);
- code = erts_dsig_prepare(&dsd, dep, BIF_P,
+ code = erts_dsig_prepare(&ctx, dep, BIF_P,
ERTS_PROC_LOCK_MAIN,
- ERTS_DSP_RLOCK, 0, 1);
+ ERTS_DSP_RLOCK, 0, 1, 1);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
@@ -218,16 +218,14 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
* We have (pending) connection.
* Setup link and enqueue link signal.
*/
-#ifdef DEBUG
- int inserted =
-#endif
- erts_link_dist_insert(&ldp->b, dep->mld);
- ASSERT(inserted);
+ int inserted = erts_link_dist_insert(&ldp->b, dep->mld);
+ ASSERT(inserted); (void)inserted;
erts_de_runlock(dep);
- code = erts_dsig_send_link(&dsd, BIF_P->common.id, BIF_ARG_1);
+ code = erts_dsig_send_link(&ctx, BIF_P->common.id, BIF_ARG_1);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
BIF_RET(am_true);
break;
}
@@ -309,7 +307,7 @@ demonitor(Process *c_p, Eterm ref, Eterm *multip)
DistEntry *dep;
int code = ERTS_DSIG_SEND_OK;
int deleted;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
ASSERT(is_external_pid(to) || is_node_name_atom(to));
@@ -325,8 +323,8 @@ demonitor(Process *c_p, Eterm ref, Eterm *multip)
}
}
- code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_PROC_LOCK_MAIN,
- ERTS_DSP_RLOCK, 0, 0);
+ code = erts_dsig_prepare(&ctx, dep, c_p, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_RLOCK, 0, 1, 0);
deleted = erts_monitor_dist_delete(&mdp->target);
@@ -355,8 +353,8 @@ demonitor(Process *c_p, Eterm ref, Eterm *multip)
* monitor list since in case of monitor name
* the atom is stored there. Yield if necessary.
*/
- code = erts_dsig_send_demonitor(&dsd, c_p->common.id,
- watched, mdp->ref, 0);
+ code = erts_dsig_send_demonitor(&ctx, c_p->common.id,
+ watched, mdp->ref);
break;
}
@@ -536,7 +534,7 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2)
}
if (is_external_pid(target)) {
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
int code;
dep = external_pid_dist_entry(target);
@@ -554,9 +552,9 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2)
BIF_P->common.id, id, name);
erts_monitor_tree_insert(&ERTS_P_MONITORS(BIF_P), &mdp->origin);
- code = erts_dsig_prepare(&dsd, dep,
+ code = erts_dsig_prepare(&ctx, dep,
BIF_P, ERTS_PROC_LOCK_MAIN,
- ERTS_DSP_RLOCK, 0, 1);
+ ERTS_DSP_RLOCK, 0, 1, 1);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
@@ -567,15 +565,11 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2)
case ERTS_DSIG_PREP_PENDING:
case ERTS_DSIG_PREP_CONNECTED: {
-#ifdef DEBUG
- int inserted =
-#endif
-
- erts_monitor_dist_insert(&mdp->target, dep->mld);
- ASSERT(inserted);
+ int inserted = erts_monitor_dist_insert(&mdp->target, dep->mld);
+ ASSERT(inserted); (void)inserted;
erts_de_runlock(dep);
- code = erts_dsig_send_monitor(&dsd, BIF_P->common.id, target, ref);
+ code = erts_dsig_send_monitor(&ctx, BIF_P->common.id, target, ref);
break;
}
@@ -921,7 +915,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
ErtsLinkData *ldp;
DistEntry *dep;
int code;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
dep = external_pid_dist_entry(BIF_ARG_1);
if (dep == erts_this_dist_entry)
@@ -939,15 +933,15 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
else
erts_link_release(lnk);
- code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_PROC_LOCK_MAIN,
- ERTS_DSP_NO_LOCK, 0, 0);
+ code = erts_dsig_prepare(&ctx, dep, BIF_P, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_NO_LOCK, 0, 1, 0);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
BIF_RET(am_true);
case ERTS_DSIG_PREP_PENDING:
case ERTS_DSIG_PREP_CONNECTED:
- code = erts_dsig_send_unlink(&dsd, BIF_P->common.id, BIF_ARG_1);
+ code = erts_dsig_send_unlink(&ctx, BIF_P->common.id, BIF_ARG_1);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
break;
@@ -1308,10 +1302,11 @@ static BIF_RETTYPE send_exit_signal_bif(Process *c_p, Eterm id, Eterm reason, in
ERTS_BIF_PREP_RET(ret_val, am_true); /* Old incarnation of this node... */
else {
int code;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
+
+ code = erts_dsig_prepare(&ctx, dep, c_p, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_NO_LOCK, 0, 0, 1);
- code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_PROC_LOCK_MAIN,
- ERTS_DSP_NO_LOCK, 0, 1);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
@@ -1319,11 +1314,29 @@ static BIF_RETTYPE send_exit_signal_bif(Process *c_p, Eterm id, Eterm reason, in
break;
case ERTS_DSIG_PREP_PENDING:
case ERTS_DSIG_PREP_CONNECTED:
- code = erts_dsig_send_exit2(&dsd, c_p->common.id, id, reason);
- if (code == ERTS_DSIG_SEND_YIELD)
+ code = erts_dsig_send_exit2(&ctx, c_p->common.id, id, reason);
+ switch (code) {
+ case ERTS_DSIG_SEND_YIELD:
ERTS_BIF_PREP_YIELD_RETURN(ret_val, c_p, am_true);
- else
+ break;
+ case ERTS_DSIG_SEND_CONTINUE:
+ BUMP_ALL_REDS(c_p);
+ erts_set_gc_state(c_p, 0);
+ ERTS_BIF_PREP_TRAP1(ret_val, &dsend_continue_trap_export, c_p,
+ erts_dsend_export_trap_context(c_p, &ctx));
+ break;
+ case ERTS_DSIG_SEND_OK:
ERTS_BIF_PREP_RET(ret_val, am_true);
+ break;
+ case ERTS_DSIG_SEND_TOO_LRG:
+ erts_set_gc_state(c_p, 1);
+ ERTS_BIF_PREP_ERROR(ret_val, c_p, SYSTEM_LIMIT);
+ break;
+ default:
+ ASSERT(! "Invalid dsig send exit2 result");
+ ERTS_BIF_PREP_ERROR(ret_val, c_p, EXC_INTERNAL_ERROR);
+ break;
+ }
break;
default:
ASSERT(! "Invalid dsig prepare result");
@@ -1803,36 +1816,40 @@ ebif_bang_2(BIF_ALIST_2)
#define SEND_INTERNAL_ERROR (-6)
#define SEND_AWAIT_RESULT (-7)
#define SEND_YIELD_CONTINUE (-8)
+#define SEND_SYSTEM_LIMIT (-9)
static Sint remote_send(Process *p, DistEntry *dep,
- Eterm to, Eterm full_to, Eterm msg,
- ErtsSendContext* ctx)
+ Eterm to, Eterm node, Eterm full_to, Eterm msg,
+ Eterm return_term, Eterm *ctxpp,
+ int connect, int suspend)
{
Sint res;
int code;
+ ErtsDSigSendContext ctx;
ASSERT(is_atom(to) || is_external_pid(to));
- ctx->dep = dep;
- code = erts_dsig_prepare(&ctx->dsd, dep, p, ERTS_PROC_LOCK_MAIN,
+ code = erts_dsig_prepare(&ctx, dep, p, ERTS_PROC_LOCK_MAIN,
ERTS_DSP_NO_LOCK,
- !ctx->suspend, ctx->connect);
+ !suspend, 0, connect);
+ ctx.return_term = return_term;
+ ctx.node = node;
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
res = SEND_NOCONNECT;
break;
case ERTS_DSIG_PREP_WOULD_SUSPEND:
- ASSERT(!ctx->suspend);
+ ASSERT(!suspend);
res = SEND_YIELD;
break;
case ERTS_DSIG_PREP_PENDING:
case ERTS_DSIG_PREP_CONNECTED: {
if (is_atom(to))
- code = erts_dsig_send_reg_msg(to, msg, ctx);
+ code = erts_dsig_send_reg_msg(&ctx, to, msg);
else
- code = erts_dsig_send_msg(to, msg, ctx);
+ code = erts_dsig_send_msg(&ctx, to, msg);
/*
* Note that reductions have been bumped on calling
* process by erts_dsig_send_reg_msg() or
@@ -1840,8 +1857,20 @@ static Sint remote_send(Process *p, DistEntry *dep,
*/
if (code == ERTS_DSIG_SEND_YIELD)
res = SEND_YIELD_RETURN;
- else if (code == ERTS_DSIG_SEND_CONTINUE)
+ else if (code == ERTS_DSIG_SEND_CONTINUE) {
+ erts_set_gc_state(p, 0);
+
+ /* Keep a reference to the dist entry if the
+ name is an not a pid. */
+ if (is_atom(to)) {
+ erts_ref_dist_entry(ctx.dep);
+ ctx.deref_dep = 1;
+ }
+
+ *ctxpp = erts_dsend_export_trap_context(p, &ctx);
res = SEND_YIELD_CONTINUE;
+ } else if (code == ERTS_DSIG_SEND_TOO_LRG)
+ res = SEND_SYSTEM_LIMIT;
else
res = 0;
break;
@@ -1862,7 +1891,8 @@ static Sint remote_send(Process *p, DistEntry *dep,
}
static Sint
-do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx)
+do_send(Process *p, Eterm to, Eterm msg, Eterm return_term, Eterm *refp,
+ Eterm *dist_ctx, int connect, int suspend)
{
Eterm portid;
Port *pt;
@@ -1894,7 +1924,8 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx)
erts_send_error_to_logger(p->group_leader, dsbufp);
return 0;
}
- return remote_send(p, dep, to, to, msg, ctx);
+ return remote_send(p, dep, to, dep->sysname, to, msg, return_term,
+ dist_ctx, connect, suspend);
} else if (is_atom(to)) {
Eterm id = erts_whereis_name_to_id(p, to);
@@ -1949,7 +1980,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx)
ret_val = 0;
if (pt) {
- int ps_flags = ctx->suspend ? 0 : ERTS_PORT_SIG_FLG_NOSUSPEND;
+ int ps_flags = suspend ? 0 : ERTS_PORT_SIG_FLG_NOSUSPEND;
*refp = NIL;
if (IS_TRACED_FL(p, F_TRACE_SEND)) /* trace once only !! */
@@ -1964,12 +1995,12 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx)
switch (erts_port_command(p, ps_flags, pt, msg, refp)) {
case ERTS_PORT_OP_BUSY:
/* Nothing has been sent */
- if (ctx->suspend)
+ if (suspend)
erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
return SEND_YIELD;
case ERTS_PORT_OP_BUSY_SCHEDULED:
/* Message was sent */
- if (ctx->suspend) {
+ if (suspend) {
erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
ret_val = SEND_YIELD_RETURN;
break;
@@ -2040,13 +2071,10 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx)
ASSERT(dep != erts_this_dist_entry);
deref_dep = 1;
}
- ctx->dsd.node = tp[2];
- ret = remote_send(p, dep, tp[1], to, msg, ctx);
- if (ret == SEND_YIELD_CONTINUE) {
- erts_ref_dist_entry(ctx->dep);
- ctx->deref_dep = 1;
- }
+ ret = remote_send(p, dep, tp[1], tp[2], to, msg, return_term,
+ dist_ctx, connect, suspend);
+
if (deref_dep)
erts_deref_dist_entry(dep);
return ret;
@@ -2085,25 +2113,16 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
Eterm l = opts;
Sint result;
-
- DeclareTypedTmpHeap(ErtsSendContext, ctx, BIF_P);
+ int connect = 1, suspend = 1;
+ Eterm ctx;
ERTS_MSACC_PUSH_STATE_M_X();
- UseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), BIF_P);
-
- ctx->suspend = !0;
- ctx->connect = !0;
- ctx->deref_dep = 0;
- ctx->return_term = am_ok;
- ctx->dss.reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR);
- ctx->dss.phase = ERTS_DSIG_SEND_PHASE_INIT;
-
while (is_list(l)) {
if (CAR(list_val(l)) == am_noconnect) {
- ctx->connect = 0;
+ connect = 0;
} else if (CAR(list_val(l)) == am_nosuspend) {
- ctx->suspend = 0;
+ suspend = 0;
} else {
ERTS_BIF_PREP_ERROR(retval, p, BADARG);
goto done;
@@ -2120,7 +2139,7 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
#endif
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_SEND);
- result = do_send(p, to, msg, &ref, ctx);
+ result = do_send(p, to, msg, am_ok, &ref, &ctx, connect, suspend);
ERTS_MSACC_POP_STATE_M_X();
if (result >= 0) {
@@ -2133,22 +2152,21 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
switch (result) {
case SEND_NOCONNECT:
- if (ctx->connect) {
+ if (connect) {
ERTS_BIF_PREP_RET(retval, am_ok);
} else {
ERTS_BIF_PREP_RET(retval, am_noconnect);
}
break;
case SEND_YIELD:
- if (ctx->suspend) {
- ERTS_BIF_PREP_YIELD3(retval,
- bif_export[BIF_send_3], p, to, msg, opts);
+ if (suspend) {
+ ERTS_BIF_PREP_YIELD3(retval, bif_export[BIF_send_3], p, to, msg, opts);
} else {
ERTS_BIF_PREP_RET(retval, am_nosuspend);
}
break;
case SEND_YIELD_RETURN:
- if (!ctx->suspend) {
+ if (!suspend) {
ERTS_BIF_PREP_RET(retval, am_nosuspend);
break;
}
@@ -2162,6 +2180,9 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
case SEND_BADARG:
ERTS_BIF_PREP_ERROR(retval, p, BADARG);
break;
+ case SEND_SYSTEM_LIMIT:
+ ERTS_BIF_PREP_ERROR(retval, p, SYSTEM_LIMIT);
+ break;
case SEND_USER_ERROR:
ERTS_BIF_PREP_ERROR(retval, p, EXC_ERROR);
break;
@@ -2170,9 +2191,7 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
break;
case SEND_YIELD_CONTINUE:
BUMP_ALL_REDS(p);
- erts_set_gc_state(p, 0);
- ERTS_BIF_PREP_TRAP1(retval, &dsend_continue_trap_export, p,
- erts_dsend_export_trap_context(p, ctx));
+ ERTS_BIF_PREP_TRAP1(retval, &dsend_continue_trap_export, p, ctx);
break;
default:
erts_exit(ERTS_ABORT_EXIT, "send_3 invalid result %d\n", (int)result);
@@ -2180,7 +2199,6 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
}
done:
- UnUseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), BIF_P);
return retval;
}
@@ -2194,14 +2212,14 @@ BIF_RETTYPE send_2(BIF_ALIST_2)
static BIF_RETTYPE dsend_continue_trap_1(BIF_ALIST_1)
{
Binary* bin = erts_magic_ref2bin(BIF_ARG_1);
- ErtsSendContext* ctx = (ErtsSendContext*) ERTS_MAGIC_BIN_DATA(bin);
+ ErtsDSigSendContext *ctx = (ErtsDSigSendContext*) ERTS_MAGIC_BIN_DATA(bin);
Sint initial_reds = (Sint) (ERTS_BIF_REDS_LEFT(BIF_P) * TERM_TO_BINARY_LOOP_FACTOR);
int result;
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == erts_dsend_context_dtor);
- ctx->dss.reds = initial_reds;
- result = erts_dsig_send(&ctx->dsd, &ctx->dss);
+ ctx->reds = initial_reds;
+ result = erts_dsig_send(ctx);
switch (result) {
case ERTS_DSIG_SEND_OK:
@@ -2210,7 +2228,7 @@ static BIF_RETTYPE dsend_continue_trap_1(BIF_ALIST_1)
break;
case ERTS_DSIG_SEND_YIELD: /*SEND_YIELD_RETURN*/
erts_set_gc_state(BIF_P, 1);
- if (!ctx->suspend)
+ if (ctx->no_suspend)
BIF_RET(am_nosuspend);
ERTS_BIF_YIELD_RETURN(BIF_P, ctx->return_term);
@@ -2218,6 +2236,10 @@ static BIF_RETTYPE dsend_continue_trap_1(BIF_ALIST_1)
BUMP_ALL_REDS(BIF_P);
BIF_TRAP1(&dsend_continue_trap_export, BIF_P, BIF_ARG_1);
}
+ case ERTS_DSIG_SEND_TOO_LRG: { /*SEND_SYSTEM_LIMIT*/
+ erts_set_gc_state(BIF_P, 1);
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ }
default:
erts_exit(ERTS_ABORT_EXIT, "dsend_continue_trap invalid result %d\n", (int)result);
break;
@@ -2231,20 +2253,14 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
Eterm retval;
Eterm ref;
Sint result;
- DeclareTypedTmpHeap(ErtsSendContext, ctx, p);
+ Eterm ctx;
ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_SEND);
- UseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), p);
+
#ifdef DEBUG
ref = NIL;
#endif
- ctx->suspend = !0;
- ctx->connect = !0;
- ctx->deref_dep = 0;
- ctx->return_term = msg;
- ctx->dss.reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR);
- ctx->dss.phase = ERTS_DSIG_SEND_PHASE_INIT;
- result = do_send(p, to, msg, &ref, ctx);
+ result = do_send(p, to, msg, msg, &ref, &ctx, 1, 1);
ERTS_MSACC_POP_STATE_M_X();
@@ -2275,6 +2291,9 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
case SEND_BADARG:
ERTS_BIF_PREP_ERROR(retval, p, BADARG);
break;
+ case SEND_SYSTEM_LIMIT:
+ ERTS_BIF_PREP_ERROR(retval, p, SYSTEM_LIMIT);
+ break;
case SEND_USER_ERROR:
ERTS_BIF_PREP_ERROR(retval, p, EXC_ERROR);
break;
@@ -2283,9 +2302,7 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
break;
case SEND_YIELD_CONTINUE:
BUMP_ALL_REDS(p);
- erts_set_gc_state(p, 0);
- ERTS_BIF_PREP_TRAP1(retval, &dsend_continue_trap_export, p,
- erts_dsend_export_trap_context(p, ctx));
+ ERTS_BIF_PREP_TRAP1(retval, &dsend_continue_trap_export, p, ctx);
break;
default:
erts_exit(ERTS_ABORT_EXIT, "invalid send result %d\n", (int)result);
@@ -2293,7 +2310,6 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
}
done:
- UnUseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), p);
return retval;
}
@@ -2359,7 +2375,7 @@ accumulate(Eterm acc, Uint size)
* bignum buffer with one extra word to be used if
* the bignum grows in the future.
*/
- Eterm* hp = (Eterm *) erts_alloc(ERTS_ALC_T_TEMP_TERM,
+ Eterm* hp = (Eterm *) erts_alloc(ERTS_ALC_T_SHORT_LIVED_TERM,
(BIG_UINT_HEAP_SIZE+1) *
sizeof(Eterm));
return uint_to_big(size, hp);
@@ -2379,7 +2395,7 @@ accumulate(Eterm acc, Uint size)
* The extra word has been consumed. Grow the
* allocation by one word.
*/
- big = (Eterm *) erts_realloc(ERTS_ALC_T_TEMP_TERM,
+ big = (Eterm *) erts_realloc(ERTS_ALC_T_SHORT_LIVED_TERM,
big_val(acc),
(need_heap+1) * sizeof(Eterm));
acc = make_big(big);
@@ -2408,29 +2424,86 @@ consolidate(Process* p, Eterm acc, Uint size)
while (sz--) {
*hp++ = *big++;
}
- erts_free(ERTS_ALC_T_TEMP_TERM, (void *) big_val(acc));
+ erts_free(ERTS_ALC_T_SHORT_LIVED_TERM, (void *) big_val(acc));
return res;
}
}
+typedef struct {
+ Eterm obj;
+ Uint size;
+ Eterm acc;
+ Eterm input_list;
+ ErtsEStack stack;
+ int is_trap_at_L_iter_list;
+} ErtsIOListSizeContext;
+
+static int iolist_size_ctx_bin_dtor(Binary *context_bin) {
+ ErtsIOListSizeContext* context = ERTS_MAGIC_BIN_DATA(context_bin);
+ DESTROY_SAVED_ESTACK(&context->stack);
+ if (context->acc != THE_NON_VALUE) {
+ erts_free(ERTS_ALC_T_SHORT_LIVED_TERM, (void *) big_val(context->acc));
+ }
+ return 1;
+}
+
BIF_RETTYPE iolist_size_1(BIF_ALIST_1)
{
- Eterm obj, hd;
+ static const Uint ITERATIONS_PER_RED = 64;
+ Eterm input_list, obj, hd;
Eterm* objp;
Uint size = 0;
Uint cur_size;
Uint new_size;
Eterm acc = THE_NON_VALUE;
DECLARE_ESTACK(s);
-
- obj = BIF_ARG_1;
+ Uint max_iterations;
+ Uint iterations_until_trap = max_iterations =
+ ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(BIF_P);
+ ErtsIOListSizeContext* context = NULL;
+ Eterm state_mref;
+ int is_trap_at_L_iter_list;
+ ERTS_UNDEF(state_mref, THE_NON_VALUE);
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+#ifdef DEBUG
+ iterations_until_trap = iterations_until_trap / 10;
+#endif
+ input_list = obj = BIF_ARG_1;
+ if (is_internal_magic_ref(obj)) {
+ /* Restore state after a trap */
+ Binary* state_bin;
+ state_mref = obj;
+ state_bin = erts_magic_ref2bin(state_mref);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(state_bin) != iolist_size_ctx_bin_dtor) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ context = ERTS_MAGIC_BIN_DATA(state_bin);
+ obj = context->obj;
+ size = context->size;
+ acc = context->acc;
+ input_list = context->input_list;
+ ESTACK_RESTORE(s, &context->stack);
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+ erts_set_gc_state(BIF_P, 1);
+ if (context->is_trap_at_L_iter_list) {
+ goto L_iter_list;
+ }
+ }
goto L_again;
while (!ESTACK_ISEMPTY(s)) {
obj = ESTACK_POP(s);
+ if (iterations_until_trap == 0) {
+ is_trap_at_L_iter_list = 0;
+ goto L_save_state_and_trap;
+ }
L_again:
if (is_list(obj)) {
L_iter_list:
+ if (iterations_until_trap == 0) {
+ is_trap_at_L_iter_list = 1;
+ goto L_save_state_and_trap;
+ }
objp = list_val(obj);
hd = CAR(objp);
obj = CDR(objp);
@@ -2452,12 +2525,14 @@ BIF_RETTYPE iolist_size_1(BIF_ALIST_1)
} else if (is_list(hd)) {
ESTACK_PUSH(s, obj);
obj = hd;
+ iterations_until_trap--;
goto L_iter_list;
} else if (is_not_nil(hd)) {
goto L_type_error;
}
/* Tail */
if (is_list(obj)) {
+ iterations_until_trap--;
goto L_iter_list;
} else if (is_binary(obj) && binary_bitsize(obj) == 0) {
cur_size = binary_size(obj);
@@ -2481,14 +2556,55 @@ BIF_RETTYPE iolist_size_1(BIF_ALIST_1)
} else if (is_not_nil(obj)) {
goto L_type_error;
}
+ iterations_until_trap--;
}
DESTROY_ESTACK(s);
+ BUMP_REDS(BIF_P, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED);
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ if (context != NULL) {
+ /* context->acc needs to be reset so that
+ iolist_size_ctx_bin_dtor does not deallocate twice */
+ context->acc = THE_NON_VALUE;
+ }
BIF_RET(consolidate(BIF_P, acc, size));
L_type_error:
DESTROY_ESTACK(s);
- BIF_ERROR(BIF_P, BADARG);
+ if (acc != THE_NON_VALUE) {
+ erts_free(ERTS_ALC_T_SHORT_LIVED_TERM, (void *) big_val(acc));
+ if (context != NULL) {
+ context->acc = THE_NON_VALUE;
+ }
+ }
+ BUMP_REDS(BIF_P, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED);
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ if (context == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ } else {
+ ERTS_BIF_ERROR_TRAPPED1(BIF_P,
+ BADARG,
+ bif_export[BIF_iolist_size_1],
+ input_list);
+ }
+
+ L_save_state_and_trap:
+ if (context == NULL) {
+ Binary *state_bin = erts_create_magic_binary(sizeof(ErtsIOListSizeContext),
+ iolist_size_ctx_bin_dtor);
+ Eterm* hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ state_mref = erts_mk_magic_ref(&hp, &MSO(BIF_P), state_bin);
+ context = ERTS_MAGIC_BIN_DATA(state_bin);
+ }
+ context->obj = obj;
+ context->size = size;
+ context->acc = acc;
+ context->is_trap_at_L_iter_list = is_trap_at_L_iter_list;
+ context->input_list = input_list;
+ ESTACK_SAVE(s, &context->stack);
+ erts_set_gc_state(BIF_P, 0);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(bif_export[BIF_iolist_size_1], BIF_P, state_mref);
}
/**********************************************************************/
@@ -2732,9 +2848,7 @@ BIF_RETTYPE atom_to_list_1(BIF_ALIST_1)
Uint num_chars, num_built, num_eaten;
byte* err_pos;
Eterm res;
-#ifdef DEBUG
int ares;
-#endif
if (is_not_atom(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
@@ -2744,11 +2858,9 @@ BIF_RETTYPE atom_to_list_1(BIF_ALIST_1)
if (ap->len == 0)
BIF_RET(NIL); /* the empty atom */
-#ifdef DEBUG
ares =
-#endif
erts_analyze_utf8(ap->name, ap->len, &err_pos, &num_chars, NULL);
- ASSERT(ares == ERTS_UTF8_OK);
+ ASSERT(ares == ERTS_UTF8_OK); (void)ares;
res = erts_utf8_to_list(BIF_P, num_chars, ap->name, ap->len, ap->len,
&num_built, &num_eaten, NIL);
@@ -4051,10 +4163,12 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
if (is_nil(dep->cid))
goto bad;
- enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+ etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
+
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation,
+ make_boxed(&etp->header));
ASSERT(enp != erts_this_node);
- etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
etp->header = make_external_pid_header(1);
etp->next = MSO(BIF_P).first;
etp->node = enp;
@@ -4118,10 +4232,11 @@ BIF_RETTYPE list_to_port_1(BIF_ALIST_1)
if (is_nil(dep->cid))
goto bad;
- enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+ etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation,
+ make_boxed(&etp->header));
ASSERT(enp != erts_this_node);
- etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
etp->header = make_external_port_header(1);
etp->next = MSO(BIF_P).first;
etp->node = enp;
@@ -4199,7 +4314,7 @@ BIF_RETTYPE list_to_ref_1(BIF_ALIST_1)
if(dep == erts_this_dist_entry) {
ErtsMagicBinary *mb;
Uint32 sid;
- if (refn[0] > MAX_REFERENCE) goto bad;
+ if (refn[0] >= MAX_REFERENCE) goto bad;
if (n != ERTS_REF_NUMBERS) goto bad;
sid = erts_get_ref_numbers_thr_id(refn);
if (sid > erts_no_schedulers) goto bad;
@@ -4224,9 +4339,6 @@ BIF_RETTYPE list_to_ref_1(BIF_ALIST_1)
if (is_nil(dep->cid))
goto bad;
- enp = erts_find_or_insert_node(dep->sysname, dep->creation);
- ASSERT(enp != erts_this_node);
-
hsz = EXTERNAL_THING_HEAD_SIZE;
#if defined(ARCH_64)
hsz += n/2 + 1;
@@ -4235,6 +4347,11 @@ BIF_RETTYPE list_to_ref_1(BIF_ALIST_1)
#endif
etp = (ExternalThing *) HAlloc(BIF_P, hsz);
+
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation,
+ make_boxed(&etp->header));
+ ASSERT(enp != erts_this_node);
+
etp->header = make_external_ref_header(n/2);
etp->next = BIF_P->off_heap.first;
etp->node = enp;
@@ -4354,21 +4471,21 @@ BIF_RETTYPE erts_internal_group_leader_2(BIF_ALIST_2)
if (is_external_pid(BIF_ARG_2)) {
DistEntry *dep;
int code;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
dep = external_pid_dist_entry(BIF_ARG_2);
ERTS_ASSERT(dep);
if(dep == erts_this_dist_entry)
BIF_ERROR(BIF_P, BADARG);
- code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_PROC_LOCK_MAIN,
- ERTS_DSP_NO_LOCK, 0, 1);
+ code = erts_dsig_prepare(&ctx, dep, BIF_P, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_NO_LOCK, 0, 1, 1);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
BIF_RET(am_true);
case ERTS_DSIG_PREP_PENDING:
case ERTS_DSIG_PREP_CONNECTED:
- code = erts_dsig_send_group_leader(&dsd, BIF_ARG_1, BIF_ARG_2);
+ code = erts_dsig_send_group_leader(&ctx, BIF_ARG_1, BIF_ARG_2);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
BIF_RET(am_true);
@@ -4514,13 +4631,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(old_value);
- } else if (BIF_ARG_1 == am_display_items) {
- int oval = display_items;
- if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) {
- goto error;
- }
- display_items = n < 32 ? 32 : n;
- BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_debug_flags) {
BIF_RET(am_true);
} else if (BIF_ARG_1 == am_backtrace_depth) {
@@ -5056,6 +5166,12 @@ erts_schedule_bif(Process *proc,
pc = i;
mfa = &exp->info.mfa;
}
+ else if (BeamIsOpCode(*i, op_call_bif_only_e)) {
+ /* Pointer to bif export in i+1 */
+ exp = (Export *) i[1];
+ pc = i;
+ mfa = &exp->info.mfa;
+ }
else if (BeamIsOpCode(*i, op_apply_bif)) {
/* Pointer to bif in i+1, and mfa in i-3 */
pc = c_p->cp;
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 8419244832..db9c258cb7 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -41,7 +41,7 @@
#
-gcbif erlang:abs/1
+ubif erlang:abs/1
bif erlang:adler32/1
bif erlang:adler32/2
bif erlang:adler32_combine/3
@@ -66,7 +66,7 @@ bif erlang:exit/2
bif erlang:exit_signal/2
bif erlang:external_size/1
bif erlang:external_size/2
-gcbif erlang:float/1
+ubif erlang:float/1
bif erlang:float_to_list/1
bif erlang:float_to_list/2
bif erlang:fun_info/2
@@ -84,7 +84,7 @@ bif erlang:phash2/2
ubif erlang:hd/1
bif erlang:integer_to_list/1
bif erlang:is_alive/0
-gcbif erlang:length/1
+ubif erlang:length/1
bif erlang:link/1
bif erlang:list_to_atom/1
bif erlang:list_to_binary/1
@@ -133,10 +133,10 @@ bif erlang:processes/0
bif erlang:put/2
bif erlang:register/2
bif erlang:registered/0
-gcbif erlang:round/1
+ubif erlang:round/1
ubif erlang:self/0
bif erlang:setelement/3
-gcbif erlang:size/1
+ubif erlang:size/1
bif erlang:spawn/3
bif erlang:spawn_link/3
bif erlang:split_binary/2
@@ -146,7 +146,7 @@ bif erlang:term_to_binary/2
bif erlang:throw/1
bif erlang:time/0
ubif erlang:tl/1
-gcbif erlang:trunc/1
+ubif erlang:trunc/1
bif erlang:tuple_to_list/1
bif erlang:universaltime/0
bif erlang:universaltime_to_localtime/1
@@ -160,6 +160,8 @@ bif erlang:dist_ctrl_input_handler/2
bif erlang:dist_ctrl_put_data/2
bif erlang:dist_ctrl_get_data/1
bif erlang:dist_ctrl_get_data_notification/1
+bif erlang:dist_ctrl_get_opt/2
+bif erlang:dist_ctrl_set_opt/3
# Static native functions in erts_internal
bif erts_internal:port_info/1
@@ -435,6 +437,7 @@ bif erts_debug:set_internal_state/2
bif erts_debug:display/1
bif erts_debug:dist_ext_to_term/2
bif erts_debug:instructions/0
+bif erts_debug:interpreter_size/0
bif erts_debug:dirty_cpu/2
bif erts_debug:dirty_io/2
bif erts_debug:dirty/3
@@ -481,8 +484,8 @@ bif erlang:list_to_existing_atom/1
#
ubif erlang:is_bitstring/1
ubif erlang:tuple_size/1
-gcbif erlang:byte_size/1
-gcbif erlang:bit_size/1
+ubif erlang:byte_size/1
+ubif erlang:bit_size/1
bif erlang:list_to_bitstring/1
bif erlang:bitstring_to_list/1
@@ -534,8 +537,8 @@ bif erlang:binary_to_term/2
#
# The searching/splitting/substituting thingies
#
-gcbif erlang:binary_part/2
-gcbif erlang:binary_part/3
+ubif erlang:binary_part/2
+ubif erlang:binary_part/3
bif binary:compile_pattern/1
bif binary:match/2
@@ -623,14 +626,13 @@ bif io:printable_range/0
bif re:inspect/2
ubif erlang:is_map/1
-gcbif erlang:map_size/1
+ubif erlang:map_size/1
bif maps:find/2
bif maps:get/2
bif maps:from_list/1
bif maps:is_key/2
bif maps:keys/1
bif maps:merge/2
-bif maps:new/0
bif maps:put/3
bif maps:remove/2
bif maps:update/3
@@ -672,8 +674,8 @@ bif maps:take/2
# New in 20.0
#
-gcbif erlang:floor/1
-gcbif erlang:ceil/1
+ubif erlang:floor/1
+ubif erlang:ceil/1
bif math:floor/1
bif math:ceil/1
bif math:fmod/2
diff --git a/erts/emulator/beam/bif_instrs.tab b/erts/emulator/beam/bif_instrs.tab
index 00854471a9..8e0caa38a3 100644
--- a/erts/emulator/beam/bif_instrs.tab
+++ b/erts/emulator/beam/bif_instrs.tab
@@ -31,13 +31,20 @@
CALL_GUARD_BIF(BF, TmpReg, Dst) {
Eterm result;
+#ifdef DEBUG
+ Eterm* orig_htop = HTOP;
+ Eterm* orig_stop = E;
+#endif
ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
ERTS_CHK_MBUF_SZ(c_p);
+ DEBUG_SWAPOUT;
result = (*$BF)(c_p, $TmpReg, I);
+ DEBUG_SWAPIN;
+ ASSERT(orig_htop == HTOP && orig_stop == E);
ERTS_CHK_MBUF_SZ(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -51,237 +58,259 @@ CALL_GUARD_BIF(BF, TmpReg, Dst) {
}
}
-// Guard BIF in head. On failure, ignore the error and jump
-// to the code for the next clause. We don't support tracing
+// Guard BIF in head. On failure, ignore the error and jump
+// to the code for the next clause. We don't support tracing
// of guard BIFs.
-bif1(Fail, Bif, Src, Dst) {
+i_bif1 := i_bif.fetch0.call;
+i_bif2 := i_bif.fetch1.fetch0.call;
+i_bif3 := i_bif.fetch2.fetch1.fetch0.call;
+
+i_bif.head() {
ErtsBifFunc bf;
- Eterm tmp_reg[1];
+ Eterm tmp_reg[3];
+}
+i_bif.fetch0(Src) {
tmp_reg[0] = $Src;
- bf = (BifFunction) $Bif;
- $CALL_GUARD_BIF(bf, tmp_reg, $Dst);
-
- $FAIL($Fail);
}
-//
-// Guard BIF in body. It can fail like any BIF. No trace support.
-//
+i_bif.fetch1(Src) {
+ tmp_reg[1] = $Src;
+}
-bif1_body(Bif, Src, Dst) {
- ErtsBifFunc bf;
- Eterm tmp_reg[1];
+i_bif.fetch2(Src) {
+ tmp_reg[2] = $Src;
+}
- tmp_reg[0] = $Src;
+i_bif.call(Fail, Bif, Dst) {
bf = (BifFunction) $Bif;
$CALL_GUARD_BIF(bf, tmp_reg, $Dst);
- reg[0] = tmp_reg[0];
- SWAPOUT;
- I = handle_error(c_p, I, reg, ubif2mfa((void *) bf));
- goto post_error_handling;
+ $FAIL($Fail);
}
//
-// Guard bif in guard with two arguments ('and'/2, 'or'/2, 'xor'/2).
+// Guard BIF in body. It can fail like any BIF. No trace support.
//
-i_bif2(Fail, Bif, Src1, Src2, Dst) {
- Eterm tmp_reg[2];
+i_bif1_body := i_bif_body.fetch0.call;
+i_bif2_body := i_bif_body.fetch1.fetch0.call;
+i_bif3_body := i_bif_body.fetch2.fetch1.fetch0.call;
+
+i_bif_body.head() {
ErtsBifFunc bf;
+ Eterm tmp_reg[3];
+}
- tmp_reg[0] = $Src1;
- tmp_reg[1] = $Src2;
- bf = (ErtsBifFunc) $Bif;
- $CALL_GUARD_BIF(bf, tmp_reg, $Dst);
- $FAIL($Fail);
+i_bif_body.fetch0(Src) {
+ tmp_reg[0] = $Src;
}
-//
-// Guard bif in body with two arguments ('and'/2, 'or'/2, 'xor'/2).
-//
+i_bif_body.fetch1(Src) {
+ tmp_reg[1] = $Src;
+}
-i_bif2_body(Bif, Src1, Src2, Dst) {
- Eterm tmp_reg[2];
- ErtsBifFunc bf;
+i_bif_body.fetch2(Src) {
+ tmp_reg[2] = $Src;
+}
- tmp_reg[0] = $Src1;
- tmp_reg[1] = $Src2;
- bf = (ErtsBifFunc) $Bif;
+i_bif_body.call(Bif, Dst) {
+ bf = (BifFunction) $Bif;
$CALL_GUARD_BIF(bf, tmp_reg, $Dst);
+
reg[0] = tmp_reg[0];
reg[1] = tmp_reg[1];
+ reg[2] = tmp_reg[2];
SWAPOUT;
I = handle_error(c_p, I, reg, ubif2mfa((void *) bf));
goto post_error_handling;
}
//
-// Garbage-collecting BIF with one argument in either guard or body.
+// length/1 is the only guard BIF that does not execute in constant
+// time. Here follows special instructions to allow the calculation of
+// the list length to be broken in several chunks to avoid hogging
+// the scheduler for a long time.
//
-i_gc_bif1(Fail, Bif, Src, Live, Dst) {
- typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
- GcBifFunction bf;
- Eterm result;
- Uint live = (Uint) $Live;
+i_length_setup(Live, Src) {
+ Uint live = $Live;
+ Eterm src = $Src;
- x(live) = $Src;
- bf = (GcBifFunction) $Bif;
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- c_p->fcalls = FCALLS;
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
- ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, reg, live);
- ERTS_CHK_MBUF_SZ(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- SWAPIN;
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- if (ERTS_LIKELY(is_value(result))) {
- $REFRESH_GEN_DEST();
- $Dst = result;
- $NEXT0();
- }
- if (ERTS_LIKELY($Fail != 0)) { /* Handle error in guard. */
- $JUMP($Fail);
- }
+ reg[live] = src;
+ reg[live+1] = make_small(0);
+ reg[live+2] = src;
- /* Handle error in body. */
- x(0) = x(live);
- I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
- goto post_error_handling;
+ /* This instruction is always followed by i_length */
+ SET_I($NEXT_INSTRUCTION);
+ goto i_length_start__;
+ //| -no_next
}
//
-// Garbage-collecting BIF with two arguments in either guard or body.
+// This instruction can be executed one or more times. When entering
+// this instruction, the X registers have the following contents:
+//
+// reg[live+0] The remainder of the list.
+// reg[live+1] The length so far (tagged integer).
+// reg[live+2] The original list. Only used if an error is generated
+// (if the final tail of the list is not []).
//
-i_gc_bif2(Fail, Bif, Live, Src1, Src2, Dst) {
- typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
- GcBifFunction bf;
- Eterm result;
- Uint live = (Uint) $Live;
+i_length := i_length.start.execute;
- /*
- * XXX This calling convention does not make sense. 'live'
- * should point out the first argument, not the second
- * (i.e. 'live' should not be incremented below).
- */
- x(live) = $Src1;
- x(live+1) = $Src2;
- live++;
+i_length.start() {
+ i_length_start__:
+ ;
+}
+
+i_length.execute(Fail, Live, Dst) {
+ Eterm result;
+ Uint live;
- bf = (GcBifFunction) $Bif;
ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
- SWAPOUT;
PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, reg, live);
+ DEBUG_SWAPOUT;
+
+ live = $Live;
+ result = erts_trapping_length_1(c_p, reg+live);
+
+ DEBUG_SWAPIN;
ERTS_CHK_MBUF_SZ(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
- SWAPIN;
ERTS_HOLE_CHECK(c_p);
FCALLS = c_p->fcalls;
ERTS_DBG_CHK_REDS(c_p, FCALLS);
if (ERTS_LIKELY(is_value(result))) {
+ /* Successful calculation of the list length. */
$REFRESH_GEN_DEST();
$Dst = result;
$NEXT0();
+ } else if (c_p->freason == TRAP) {
+ /*
+ * Good so far, but there is more work to do. Yield.
+ */
+ $SET_CP_I_ABS(I);
+ SWAPOUT;
+ c_p->arity = live + 3;
+ c_p->current = NULL;
+ goto context_switch3;
+ } else {
+ /* Error. */
+ $BIF_ERROR_ARITY_1($Fail, BIF_length_1, reg[live+2]);
}
-
- if (ERTS_LIKELY($Fail != 0)) { /* Handle error in guard. */
- $JUMP($Fail);
- }
-
- /* Handle error in body. */
- live--;
- x(0) = x(live);
- x(1) = x(live+1);
- I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
- goto post_error_handling;
+ //| -no_next
}
//
-// Garbage-collecting BIF with three arguments in either guard or body.
+// Call a BIF, store the result in x(0) and transfer control to the
+// next instruction.
//
-
-i_gc_bif3(Fail, Bif, Live, Src2, Src3, Dst) {
- typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
- GcBifFunction bf;
+call_bif(Exp) {
+ ErtsBifFunc bf;
Eterm result;
- Uint live = (Uint) $Live;
+ ErlHeapFragment *live_hf_end;
+ Export *export = (Export*) $Exp;
- /*
- * XXX This calling convention does not make sense. 'live'
- * should point out the first argument, not the third
- * (i.e. 'live' should not be incremented below).
- */
- x(live) = x(SCRATCH_X_REG);
- x(live+1) = $Src2;
- x(live+2) = $Src3;
- live += 2;
+ if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) {
+ /*
+ * If we have run out of reductions, do a context
+ * switch before calling the BIF.
+ */
+ c_p->arity = GET_BIF_ARITY(export);
+ c_p->current = &export->info.mfa;
+ goto context_switch3;
+ }
+
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_BIF_MODULE(export),
+ GET_BIF_ADDRESS(export));
- bf = (GcBifFunction) $Bif;
+ bf = GET_BIF_ADDRESS(export);
+
+ PRE_BIF_SWAPOUT(c_p);
ERTS_DBG_CHK_REDS(c_p, FCALLS);
- c_p->fcalls = FCALLS;
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ c_p->fcalls = FCALLS - 1;
+ if (FCALLS <= 0) {
+ save_calls(c_p, export);
+ }
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ live_hf_end = c_p->mbuf;
ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, reg, live);
+ result = (*bf)(c_p, reg, I);
ERTS_CHK_MBUF_SZ(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_HOLE_CHECK(c_p);
ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ if (ERTS_IS_GC_DESIRED(c_p)) {
+ Uint arity = GET_BIF_ARITY(export);
+ result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result,
+ reg, arity);
+ E = c_p->stop;
+ }
PROCESS_MAIN_CHK_LOCKS(c_p);
- SWAPIN;
- ERTS_HOLE_CHECK(c_p);
+ HTOP = HEAP_TOP(c_p);
FCALLS = c_p->fcalls;
ERTS_DBG_CHK_REDS(c_p, FCALLS);
+
+ /*
+ * We have to update the cache if we are enabled in order
+ * to make sure no bookkeeping is done after we disabled
+ * msacc. We don't always do this as it is quite expensive.
+ */
+ if (ERTS_MSACC_IS_ENABLED_CACHED_X()) {
+ ERTS_MSACC_UPDATE_CACHE_X();
+ }
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
if (ERTS_LIKELY(is_value(result))) {
- $REFRESH_GEN_DEST();
- $Dst = result;
+ r(0) = result;
+ CHECK_TERM(r(0));
$NEXT0();
+ } else if (c_p->freason == TRAP) {
+ /*
+ * Set the continuation pointer to return to next
+ * instruction after the trap (either by a return from
+ * erlang code or by nif_bif.epilogue() when the BIF
+ * is done).
+ */
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ SET_I(c_p->i);
+ SWAPIN;
+ Dispatch();
}
- /* Handle error in guard. */
- if (ERTS_LIKELY($Fail != 0)) {
- $JUMP($Fail);
- }
-
- /* Handle error in body. */
- live -= 2;
- x(0) = x(live);
- x(1) = x(live+1);
- x(2) = x(live+2);
- I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
+ /*
+ * Error handling. SWAPOUT is not needed because it was done above.
+ */
+ ASSERT(c_p->stop == E);
+ I = handle_error(c_p, I, reg, &export->info.mfa);
goto post_error_handling;
+ //| -no_next
}
//
-// The most general BIF call. The BIF may build any amount of data
-// on the heap. The result is always returned in r(0).
+// Call a BIF tail-recursively, storing the result in x(0) and doing
+// a return to the continuation poiner (c_p->cp).
//
-call_bif(Exp) {
+
+call_bif_only(Exp) {
ErtsBifFunc bf;
Eterm result;
ErlHeapFragment *live_hf_end;
Export *export = (Export*) $Exp;
if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) {
- /* If we have run out of reductions, we do a context
- switch before calling the bif */
+ /*
+ * If we have run out of reductions, do a context
+ * switch before calling the BIF.
+ */
c_p->arity = GET_BIF_ARITY(export);
c_p->current = &export->info.mfa;
goto context_switch3;
@@ -318,19 +347,28 @@ call_bif(Exp) {
HTOP = HEAP_TOP(c_p);
FCALLS = c_p->fcalls;
ERTS_DBG_CHK_REDS(c_p, FCALLS);
- /* We have to update the cache if we are enabled in order
- to make sure no book keeping is done after we disabled
- msacc. We don't always do this as it is quite expensive. */
+
+ /*
+ * We have to update the cache if we are enabled in order
+ * to make sure no bookkeeping is done after we disabled
+ * msacc. We don't always do this as it is quite expensive.
+ */
if (ERTS_MSACC_IS_ENABLED_CACHED_X()) {
ERTS_MSACC_UPDATE_CACHE_X();
}
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
if (ERTS_LIKELY(is_value(result))) {
+ /*
+ * Success. Store the result and return to the caller.
+ */
r(0) = result;
CHECK_TERM(r(0));
- $NEXT0();
+ $return();
} else if (c_p->freason == TRAP) {
- SET_CP(c_p, I+2);
+ /*
+ * Dispatch to a trap. When the trap is done, a jump
+ * to the continuation pointer (c_p->cp) will be done.
+ */
SET_I(c_p->i);
SWAPIN;
Dispatch();
@@ -342,6 +380,7 @@ call_bif(Exp) {
ASSERT(c_p->stop == E);
I = handle_error(c_p, I, reg, &export->info.mfa);
goto post_error_handling;
+ //| -no_next
}
//
@@ -374,7 +413,7 @@ send() {
r(0) = result;
CHECK_TERM(r(0));
} else if (c_p->freason == TRAP) {
- SET_CP(c_p, I+1);
+ SET_CP(c_p, $NEXT_INSTRUCTION);
SET_I(c_p->i);
SWAPIN;
Dispatch();
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index 274482a0d2..ad19cce395 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -42,7 +42,7 @@ typedef Uint16 ErtsHalfDigit;
#undef BIG_HAVE_DOUBLE_DIGIT
typedef Uint32 ErtsHalfDigit;
#else
-#error "can not determine machine size"
+#error "cannot determine machine size"
#endif
typedef Uint dsize_t; /* Vector size type */
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 92009c2345..6379e4e04d 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -39,6 +39,7 @@
#include "erl_hl_timer.h"
#include "erl_thr_progress.h"
#include "erl_proc_sig_queue.h"
+#include "dist.h"
/* Forward declarations -- should really appear somewhere else */
static void process_killer(void);
@@ -74,6 +75,7 @@ port_info(fmtfn_t to, void *to_arg)
void
process_info(fmtfn_t to, void *to_arg)
{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
int i, max = erts_ptab_max(&erts_proc);
for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
@@ -81,11 +83,36 @@ process_info(fmtfn_t to, void *to_arg)
/* Do not include processes with no heap,
* they are most likely just created and has invalid data
*/
- if (!ERTS_PROC_IS_EXITING(p) && p->heap != NULL)
- print_process_info(to, to_arg, p, 0);
+ if (p->heap != NULL) {
+ ErtsProcLocks locks = ((esdp && (p == esdp->current_process ||
+ p == esdp->free_process))
+ ? ERTS_PROC_LOCK_MAIN : 0);
+ print_process_info(to, to_arg, p, locks);
+ }
}
}
+ /* Look for FREE processes in the run-queues and dist entries.
+ These have been removed from the ptab but we still want them
+ in the crash dump for debugging. */
+
+ /* First loop through all run-queues */
+ for (i = 0; i < erts_no_schedulers + ERTS_NUM_DIRTY_RUNQS; i++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(i);
+ int j;
+ for (j = 0; j < ERTS_NO_PROC_PRIO_QUEUES; j++) {
+ Process *p = rq->procs.prio[j].first;
+ while (p) {
+ if (ERTS_PSFLG_FREE & erts_atomic32_read_acqb(&p->state))
+ print_process_info(to, to_arg, p, 0);
+ p = p->next;
+ }
+ }
+ }
+
+ /* Then check all dist entries */
+ erts_dist_print_procs_suspended_on_de(to, to_arg);
+
port_info(to, to_arg);
}
@@ -108,6 +135,7 @@ process_killer(void)
erts_exit(0, "");
switch(j) {
case 'k':
+ ASSERT(erts_init_process_id != ERTS_INVALID_PID);
/* Send a 'kill' exit signal from init process */
erts_proc_sig_send_exit(NULL, erts_init_process_id,
rp->common.id, am_kill, NIL,
@@ -128,7 +156,7 @@ typedef struct {
void *to_arg;
} PrintMonitorContext;
-static void doit_print_link(ErtsLink *lnk, void *vpcontext)
+static int doit_print_link(ErtsLink *lnk, void *vpcontext, Sint reds)
{
PrintMonitorContext *pcontext = vpcontext;
fmtfn_t to = pcontext->to;
@@ -140,10 +168,11 @@ static void doit_print_link(ErtsLink *lnk, void *vpcontext)
} else {
erts_print(to, to_arg, ", %T", lnk->other.item);
}
+ return 1;
}
-static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext)
+static int doit_print_monitor(ErtsMonitor *mon, void *vpcontext, Sint reds)
{
ErtsMonitorData *mdp;
PrintMonitorContext *pcontext = vpcontext;
@@ -195,14 +224,16 @@ static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext)
/* ignore other monitors... */
break;
}
+ return 1;
}
-
+
/* Display info about an individual Erlang process */
void
print_process_info(fmtfn_t to, void *to_arg, Process *p, ErtsProcLocks orig_locks)
{
int garbing = 0;
int running = 0;
+ int exiting = 0;
Sint len;
struct saved_calls *scb;
erts_aint32_t state;
@@ -223,9 +254,12 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p, ErtsProcLocks orig_lock
| ERTS_PSFLG_DIRTY_RUNNING))
running = 1;
+ if (state & ERTS_PSFLG_EXITING)
+ exiting = 1;
+
if (!(locks & ERTS_PROC_LOCK_MAIN)) {
locks |= ERTS_PROC_LOCK_MAIN;
- if (ERTS_IS_CRASH_DUMPING && running) {
+ if (ERTS_IS_CRASH_DUMPING) {
if (erts_proc_trylock(p, locks)) {
/* crash dumping and main lock taken, this probably means that
the process is doing a GC on a dirty-scheduler... so we cannot
@@ -243,7 +277,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p, ErtsProcLocks orig_lock
* If the process is registered as a global process, display the
* registered name
*/
- if (p->common.u.alive.reg)
+ if (!ERTS_PROC_IS_EXITING(p) && p->common.u.alive.reg)
erts_print(to, to_arg, "Name: %T\n", p->common.u.alive.reg->name);
/*
@@ -329,7 +363,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p, ErtsProcLocks orig_lock
}
/* display the links only if there are any*/
- if (ERTS_P_LINKS(p) || ERTS_P_MONITORS(p) || ERTS_P_LT_MONITORS(p)) {
+ if (!exiting && (ERTS_P_LINKS(p) || ERTS_P_MONITORS(p) || ERTS_P_LT_MONITORS(p))) {
PrintMonitorContext context = {1, to, to_arg};
erts_print(to, to_arg,"Link list: [");
erts_link_tree_foreach(ERTS_P_LINKS(p), doit_print_link, &context);
@@ -361,8 +395,12 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p, ErtsProcLocks orig_lock
erts_print(to, to_arg, "OldBinVHeap: %b64u\n", BIN_OLD_VHEAP(p));
erts_print(to, to_arg, "BinVHeap unused: %b64u\n",
BIN_VHEAP_SZ(p) - p->off_heap.overhead);
- erts_print(to, to_arg, "OldBinVHeap unused: %b64u\n",
- BIN_OLD_VHEAP_SZ(p) - BIN_OLD_VHEAP(p));
+ if (BIN_OLD_VHEAP_SZ(p) >= BIN_OLD_VHEAP(p)) {
+ erts_print(to, to_arg, "OldBinVHeap unused: %b64u\n",
+ BIN_OLD_VHEAP_SZ(p) - BIN_OLD_VHEAP(p));
+ } else {
+ erts_print(to, to_arg, "OldBinVHeap unused: overflow\n");
+ }
erts_print(to, to_arg, "Memory: %beu\n", erts_process_memory(p, !0));
if (garbing) {
diff --git a/erts/emulator/beam/bs_instrs.tab b/erts/emulator/beam/bs_instrs.tab
index 61eb02a7a2..bd1ad91e45 100644
--- a/erts/emulator/beam/bs_instrs.tab
+++ b/erts/emulator/beam/bs_instrs.tab
@@ -21,12 +21,57 @@
%if ARCH_64
BS_SAFE_MUL(A, B, Fail, Dst) {
- Uint64 res = ($A) * ($B);
- if (res / $B != $A) {
+ Uint a = $A;
+ Uint b = $B;
+ Uint res;
+#ifdef HAVE_OVERFLOW_CHECK_BUILTINS
+ if (__builtin_mul_overflow(a, b, &res)) {
+ $Fail;
+ }
+#else
+ res = a * b;
+ if (res / b != a) {
$Fail;
}
+#endif
$Dst = res;
}
+
+BS_GET_FIELD_SIZE(Bits, Unit, Fail, Dst) {
+ if (is_small($Bits)) {
+ Uint uint_size;
+ Sint signed_size = signed_val($Bits);
+ if (signed_size < 0) {
+ $Fail;
+ }
+ uint_size = (Uint) signed_size;
+ $BS_SAFE_MUL(uint_size, $Unit, $Fail, $Dst);
+ } else {
+ /*
+ * On a 64-bit architecture, the size of any binary
+ * that would fit in the memory fits in a small.
+ */
+ $Fail;
+ }
+}
+
+BS_GET_UNCHECKED_FIELD_SIZE(Bits, Unit, Fail, Dst) {
+ if (is_small($Bits)) {
+ Uint uint_size;
+ Sint signed_size = signed_val($Bits);
+ if (signed_size < 0) {
+ $Fail;
+ }
+ uint_size = (Uint) signed_size;
+ $Dst = uint_size * $Unit;
+ } else {
+ /*
+ * On a 64-bit architecture, the size of any binary
+ * that would fit in the memory fits in a small.
+ */
+ $Fail;
+ }
+}
%else
BS_SAFE_MUL(A, B, Fail, Dst) {
Uint64 res = (Uint64)($A) * (Uint64)($B);
@@ -35,7 +80,6 @@ BS_SAFE_MUL(A, B, Fail, Dst) {
}
$Dst = res;
}
-%endif
BS_GET_FIELD_SIZE(Bits, Unit, Fail, Dst) {
Sint signed_size;
@@ -76,11 +120,12 @@ BS_GET_UNCHECKED_FIELD_SIZE(Bits, Unit, Fail, Dst) {
}
$Dst = uint_size * $Unit;
}
+%endif
TEST_BIN_VHEAP(VNh, Nh, Live) {
Uint need = $Nh;
if (E - HTOP < need || MSO(c_p).overhead + $VNh >= BIN_VHEAP_SZ(c_p)) {
- SWAPOUT;
+ $GC_SWAPOUT();
PROCESS_MAIN_CHK_LOCKS(c_p);
FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live, FCALLS);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -90,32 +135,52 @@ TEST_BIN_VHEAP(VNh, Nh, Live) {
HEAP_SPACE_VERIFIED(need);
}
-i_bs_get_binary_all2(Fail, Ms, Live, Unit, Dst) {
+i_bs_get_binary_all2 := i_bs_get_binary_all2.fetch.execute;
+
+i_bs_get_binary_all2.head() {
+ Eterm context;
+}
+
+i_bs_get_binary_all2.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_binary_all2.execute(Fail, Live, Unit, Dst) {
ErlBinMatchBuffer *_mb;
Eterm _result;
- $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live);
- _mb = ms_matchbuffer($Ms);
+ $GC_TEST_PRESERVE(ERL_SUB_BIN_SIZE, $Live, context);
+ _mb = ms_matchbuffer(context);
if (((_mb->size - _mb->offset) % $Unit) == 0) {
LIGHT_SWAPOUT;
_result = erts_bs_get_binary_all_2(c_p, _mb);
LIGHT_SWAPIN;
HEAP_SPACE_VERIFIED(0);
ASSERT(is_value(_result));
+ $REFRESH_GEN_DEST();
$Dst = _result;
} else {
HEAP_SPACE_VERIFIED(0);
$FAIL($Fail);
}
}
+i_bs_get_binary2 := i_bs_get_binary2.fetch.execute;
-i_bs_get_binary2(Fail, Ms, Live, Sz, Flags, Dst) {
+i_bs_get_binary2.head() {
+ Eterm context;
+}
+
+i_bs_get_binary2.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_binary2.execute(Fail, Live, Sz, Flags, Dst) {
ErlBinMatchBuffer *_mb;
Eterm _result;
Uint _size;
$BS_GET_FIELD_SIZE($Sz, (($Flags) >> 3), $FAIL($Fail), _size);
- $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live);
- _mb = ms_matchbuffer($Ms);
+ $GC_TEST_PRESERVE(ERL_SUB_BIN_SIZE, $Live, context);
+ _mb = ms_matchbuffer(context);
LIGHT_SWAPOUT;
_result = erts_bs_get_binary_2(c_p, _size, $Flags, _mb);
LIGHT_SWAPIN;
@@ -123,15 +188,27 @@ i_bs_get_binary2(Fail, Ms, Live, Sz, Flags, Dst) {
if (is_non_value(_result)) {
$FAIL($Fail);
} else {
+ $REFRESH_GEN_DEST();
$Dst = _result;
}
}
-i_bs_get_binary_imm2(Fail, Ms, Live, Sz, Flags, Dst) {
+i_bs_get_binary_imm2 := i_bs_get_binary_imm2.fetch.execute;
+
+i_bs_get_binary_imm2.head() {
+ Eterm context;
+}
+
+i_bs_get_binary_imm2.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_binary_imm2.execute(Fail, Live, Sz, Flags, Dst) {
ErlBinMatchBuffer *_mb;
Eterm _result;
- $GC_TEST(0, heap_bin_size(ERL_ONHEAP_BIN_LIMIT), $Live);
- _mb = ms_matchbuffer($Ms);
+ $GC_TEST_PRESERVE(heap_bin_size(ERL_ONHEAP_BIN_LIMIT),
+ $Live, context);
+ _mb = ms_matchbuffer(context);
LIGHT_SWAPOUT;
_result = erts_bs_get_binary_2(c_p, $Sz, $Flags, _mb);
LIGHT_SWAPIN;
@@ -139,11 +216,21 @@ i_bs_get_binary_imm2(Fail, Ms, Live, Sz, Flags, Dst) {
if (is_non_value(_result)) {
$FAIL($Fail);
} else {
+ $REFRESH_GEN_DEST();
$Dst = _result;
}
}
+i_bs_get_float2 := i_bs_get_float2.fetch.execute;
-i_bs_get_float2(Fail, Ms, Live, Sz, Flags, Dst) {
+i_bs_get_float2.head() {
+ Eterm context;
+}
+
+i_bs_get_float2.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_float2.execute(Fail, Live, Sz, Flags, Dst) {
ErlBinMatchBuffer *_mb;
Eterm _result;
Sint _size;
@@ -152,8 +239,8 @@ i_bs_get_float2(Fail, Ms, Live, Sz, Flags, Dst) {
$FAIL($Fail);
}
_size *= (($Flags) >> 3);
- $GC_TEST(0, FLOAT_SIZE_OBJECT, $Live);
- _mb = ms_matchbuffer($Ms);
+ $GC_TEST_PRESERVE(FLOAT_SIZE_OBJECT, $Live, context);
+ _mb = ms_matchbuffer(context);
LIGHT_SWAPOUT;
_result = erts_bs_get_float_2(c_p, _size, ($Flags), _mb);
LIGHT_SWAPIN;
@@ -161,17 +248,29 @@ i_bs_get_float2(Fail, Ms, Live, Sz, Flags, Dst) {
if (is_non_value(_result)) {
$FAIL($Fail);
} else {
+ $REFRESH_GEN_DEST();
$Dst = _result;
}
}
-i_bs_skip_bits2(Fail, Ms, Bits, Unit) {
+i_bs_skip_bits2 := i_bs_skip_bits2.fetch.execute;
+
+i_bs_skip_bits2.head() {
+ Eterm context, bits;
+}
+
+i_bs_skip_bits2.fetch(Ctx, Bits) {
+ context = $Ctx;
+ bits = $Bits;
+}
+
+i_bs_skip_bits2.execute(Fail, Unit) {
ErlBinMatchBuffer *_mb;
size_t new_offset;
Uint _size;
- _mb = ms_matchbuffer($Ms);
- $BS_GET_FIELD_SIZE($Bits, $Unit, $FAIL($Fail), _size);
+ _mb = ms_matchbuffer(context);
+ $BS_GET_FIELD_SIZE(bits, $Unit, $FAIL($Fail), _size);
new_offset = _mb->offset + _size;
if (new_offset <= _mb->size) {
_mb->offset = new_offset;
@@ -180,16 +279,6 @@ i_bs_skip_bits2(Fail, Ms, Bits, Unit) {
}
}
-i_bs_skip_bits_all2(Fail, Ms, Unit) {
- ErlBinMatchBuffer *_mb;
- _mb = ms_matchbuffer($Ms);
- if (((_mb->size - _mb->offset) % $Unit) == 0) {
- _mb->offset = _mb->size;
- } else {
- $FAIL($Fail);
- }
-}
-
i_bs_skip_bits_imm2(Fail, Ms, Bits) {
ErlBinMatchBuffer *_mb;
size_t new_offset;
@@ -203,15 +292,25 @@ i_bs_skip_bits_imm2(Fail, Ms, Bits) {
}
i_new_bs_put_binary(Fail, Sz, Flags, Src) {
+ Eterm sz = $Sz;
Sint _size;
- $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
+ $BS_GET_UNCHECKED_FIELD_SIZE(sz, (($Flags) >> 3), $BADARG($Fail), _size);
if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2(($Src), _size))) {
$BADARG($Fail);
}
}
+i_new_bs_put_binary_all := i_new_bs_put_binary_all.fetch.execute;
-i_new_bs_put_binary_all(Fail, Src, Unit) {
- if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2(($Src), ($Unit)))) {
+i_new_bs_put_binary_all.head() {
+ Eterm src;
+}
+
+i_new_bs_put_binary_all.fetch(Src) {
+ src = $Src;
+}
+
+i_new_bs_put_binary_all.execute(Fail, Unit) {
+ if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2(src, ($Unit)))) {
$BADARG($Fail);
}
}
@@ -223,9 +322,11 @@ i_new_bs_put_binary_imm(Fail, Sz, Src) {
}
i_new_bs_put_float(Fail, Sz, Flags, Src) {
+ Eterm sz = $Sz;
+ Eterm flags = $Flags;
Sint _size;
- $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
- if (!erts_new_bs_put_float(c_p, ($Src), _size, ($Flags))) {
+ $BS_GET_UNCHECKED_FIELD_SIZE(sz, (flags >> 3), $BADARG($Fail), _size);
+ if (!erts_new_bs_put_float(c_p, ($Src), _size, flags)) {
$BADARG($Fail);
}
}
@@ -237,15 +338,27 @@ i_new_bs_put_float_imm(Fail, Sz, Flags, Src) {
}
i_new_bs_put_integer(Fail, Sz, Flags, Src) {
- Sint _size;
- $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
- if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), _size, ($Flags)))) {
- $BADARG($Fail);
- }
+ Eterm sz = $Sz;
+ Eterm flags = $Flags;
+ Sint _size;
+ $BS_GET_UNCHECKED_FIELD_SIZE(sz, (flags >> 3), $BADARG($Fail), _size);
+ if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), _size, flags))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_integer_imm := i_new_bs_put_integer_imm.fetch.execute;
+
+i_new_bs_put_integer_imm.head() {
+ Eterm src;
}
-i_new_bs_put_integer_imm(Fail, Sz, Flags, Src) {
- if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), ($Sz), ($Flags)))) {
+i_new_bs_put_integer_imm.fetch(Src) {
+ src = $Src;
+}
+
+i_new_bs_put_integer_imm.execute(Fail, Sz, Flags) {
+ if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(src, ($Sz), ($Flags)))) {
$BADARG($Fail);
}
}
@@ -724,26 +837,34 @@ bs_start_match.execute(Fail, Live, Slots, Dst) {
$FAIL($Fail);
}
header = *boxed_val(context);
- slots = $Slots;
+
+ /* Reserve a slot for the start position. */
+ slots = $Slots + 1;
live = $Live;
+
if (header_is_bin_matchstate(header)) {
ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context);
Uint actual_slots = HEADER_NUM_SLOTS(header);
+
+ /* We're not compatible with contexts created by bs_start_match3. */
+ ASSERT(actual_slots >= 1);
+
ms->save_offset[0] = ms->mb.offset;
- if (actual_slots < slots) {
- ErlBinMatchState* dst;
+ if (ERTS_UNLIKELY(actual_slots < slots)) {
+ ErlBinMatchState* expanded;
Uint live = $Live;
Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
-
$GC_TEST_PRESERVE(wordsneeded, live, context);
ms = (ErlBinMatchState *) boxed_val(context);
- dst = (ErlBinMatchState *) HTOP;
- *dst = *ms;
+ expanded = (ErlBinMatchState *) HTOP;
+ *expanded = *ms;
*HTOP = HEADER_BIN_MATCHSTATE(slots);
HTOP += wordsneeded;
HEAP_SPACE_VERIFIED(0);
- $Dst = make_matchstate(dst);
+ context = make_matchstate(expanded);
+ $REFRESH_GEN_DEST();
}
+ $Dst = context;
} else if (is_binary_header(header)) {
Eterm result;
Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
@@ -758,6 +879,7 @@ bs_start_match.execute(Fail, Live, Slots, Dst) {
if (is_non_value(result)) {
$FAIL($Fail);
}
+ $REFRESH_GEN_DEST();
$Dst = result;
} else {
$FAIL($Fail);
@@ -796,9 +918,19 @@ bs_test_unit8(Fail, Ctx) {
}
}
-i_bs_get_integer_8(Ctx, Fail, Dst) {
+i_bs_get_integer_8 := i_bs_get_integer_8.fetch.execute;
+
+i_bs_get_integer_8.head() {
+ Eterm context;
+}
+
+i_bs_get_integer_8.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_integer_8.execute(Fail, Dst) {
Eterm _result;
- ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+ ErlBinMatchBuffer* _mb = ms_matchbuffer(context);
if (_mb->size - _mb->offset < 8) {
$FAIL($Fail);
@@ -812,9 +944,19 @@ i_bs_get_integer_8(Ctx, Fail, Dst) {
$Dst = _result;
}
-i_bs_get_integer_16(Ctx, Fail, Dst) {
+i_bs_get_integer_16 := i_bs_get_integer_16.fetch.execute;
+
+i_bs_get_integer_16.head() {
+ Eterm context;
+}
+
+i_bs_get_integer_16.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_integer_16.execute(Fail, Dst) {
Eterm _result;
- ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+ ErlBinMatchBuffer* _mb = ms_matchbuffer(context);
if (_mb->size - _mb->offset < 16) {
$FAIL($Fail);
@@ -829,9 +971,19 @@ i_bs_get_integer_16(Ctx, Fail, Dst) {
}
%if ARCH_64
-i_bs_get_integer_32(Ctx, Fail, Dst) {
+i_bs_get_integer_32 := i_bs_get_integer_32.fetch.execute;
+
+i_bs_get_integer_32.head() {
+ Eterm context;
+}
+
+i_bs_get_integer_32.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_integer_32.execute(Fail, Dst) {
Uint32 _integer;
- ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+ ErlBinMatchBuffer* _mb = ms_matchbuffer(context);
if (_mb->size - _mb->offset < 32) {
$FAIL($Fail);
@@ -881,15 +1033,23 @@ bs_get_integer.execute(Fail, Flags, Dst) {
$Dst = result;
}
-i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) {
+i_bs_get_integer := i_bs_get_integer.fetch.execute;
+
+i_bs_get_integer.head() {
+ Eterm context;
+}
+
+i_bs_get_integer.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_integer.execute(Fail, Live, FlagsAndUnit, Sz, Dst) {
Uint flags;
Uint size;
- Eterm ms;
ErlBinMatchBuffer* mb;
Eterm result;
flags = $FlagsAndUnit;
- ms = $Ms;
$BS_GET_FIELD_SIZE($Sz, (flags >> 3), $FAIL($Fail), size);
if (size >= SMALL_BITS) {
Uint wordsneeded;
@@ -900,14 +1060,15 @@ i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) {
* Remember to re-acquire the matchbuffer after gc.
*/
- mb = ms_matchbuffer(ms);
+ mb = ms_matchbuffer(context);
if (mb->size - mb->offset < size) {
$FAIL($Fail);
}
wordsneeded = 1+WSIZE(NBYTES((Uint) size));
- $GC_TEST_PRESERVE(wordsneeded, $Live, ms);
+ $GC_TEST_PRESERVE(wordsneeded, $Live, context);
+ $REFRESH_GEN_DEST();
}
- mb = ms_matchbuffer(ms);
+ mb = ms_matchbuffer(context);
LIGHT_SWAPOUT;
result = erts_bs_get_integer_2(c_p, size, flags, mb);
LIGHT_SWAPIN;
@@ -918,9 +1079,19 @@ i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) {
$Dst = result;
}
-i_bs_get_utf8(Ctx, Fail, Dst) {
+i_bs_get_utf8 := i_bs_get_utf8.fetch.execute;
+
+i_bs_get_utf8.head() {
+ Eterm context;
+}
+
+i_bs_get_utf8.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_utf8.execute(Fail, Dst) {
Eterm result;
- ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx);
+ ErlBinMatchBuffer* mb = ms_matchbuffer(context);
if (mb->size - mb->offset < 8) {
$FAIL($Fail);
@@ -939,21 +1110,32 @@ i_bs_get_utf8(Ctx, Fail, Dst) {
if (is_non_value(result)) {
$FAIL($Fail);
}
+ $REFRESH_GEN_DEST();
$Dst = result;
}
-i_bs_get_utf16(Ctx, Fail, Flags, Dst) {
- ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx);
+i_bs_get_utf16 := i_bs_get_utf16.fetch.execute;
+
+i_bs_get_utf16.head() {
+ Eterm context;
+}
+
+i_bs_get_utf16.fetch(Ctx) {
+ context = $Ctx;
+}
+
+i_bs_get_utf16.execute(Fail, Flags, Dst) {
+ ErlBinMatchBuffer* mb = ms_matchbuffer(context);
Eterm result = erts_bs_get_utf16(mb, $Flags);
if (is_non_value(result)) {
$FAIL($Fail);
}
+ $REFRESH_GEN_DEST();
$Dst = result;
}
bs_context_to_binary := ctx_to_bin.fetch.execute;
-i_bs_get_binary_all_reuse := ctx_to_bin.fetch_bin.execute;
ctx_to_bin.head() {
Eterm context;
@@ -976,16 +1158,6 @@ ctx_to_bin.fetch(Src) {
}
}
-ctx_to_bin.fetch_bin(Src, Fail, Unit) {
- context = $Src;
- mb = ms_matchbuffer(context);
- size = mb->size - mb->offset;
- if (size % $Unit != 0) {
- $FAIL($Fail);
- }
- offs = mb->offset;
-}
-
ctx_to_bin.execute() {
Uint hole_size;
Uint orig = mb->orig;
@@ -1029,10 +1201,337 @@ i_bs_match_string(Ctx, Fail, Bits, Ptr) {
i_bs_save2(Src, Slot) {
ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src);
+ ASSERT(HEADER_NUM_SLOTS(_ms->thing_word) > $Slot);
_ms->save_offset[$Slot] = _ms->mb.offset;
}
i_bs_restore2(Src, Slot) {
ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src);
+ ASSERT(HEADER_NUM_SLOTS(_ms->thing_word) > $Slot);
_ms->mb.offset = _ms->save_offset[$Slot];
}
+
+bs_get_tail := bs_get_tail.fetch.execute;
+
+bs_get_tail.head() {
+ Eterm context;
+}
+
+bs_get_tail.fetch(Src) {
+ context = $Src;
+}
+
+bs_get_tail.execute(Dst, Live) {
+ ErlBinMatchBuffer* mb;
+ Uint size, offs;
+ ErlSubBin* sb;
+
+ ASSERT(header_is_bin_matchstate(*boxed_val(context)));
+
+ $GC_TEST_PRESERVE(ERL_SUB_BIN_SIZE, $Live, context);
+
+ mb = ms_matchbuffer(context);
+
+ offs = mb->offset;
+ size = mb->size - offs;
+
+ sb = (ErlSubBin *) HTOP;
+ HTOP += ERL_SUB_BIN_SIZE;
+
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = BYTE_OFFSET(size);
+ sb->bitsize = BIT_OFFSET(size);
+ sb->offs = BYTE_OFFSET(offs);
+ sb->bitoffs = BIT_OFFSET(offs);
+ sb->is_writable = 0;
+ sb->orig = mb->orig;
+
+ $REFRESH_GEN_DEST();
+ $Dst = make_binary(sb);
+}
+
+
+%if ARCH_64
+
+i_bs_start_match3_gp := i_bs_start_match3_gp.fetch.execute;
+
+i_bs_start_match3_gp.head() {
+ Eterm context;
+}
+
+i_bs_start_match3_gp.fetch(Src) {
+ context = $Src;
+}
+
+i_bs_start_match3_gp.execute(Live, Fail, Dst, Pos) {
+ Eterm header;
+ Uint position, live;
+
+ live = $Live;
+
+ if (!is_boxed(context)) {
+ $FAIL($Fail);
+ }
+
+ header = *boxed_val(context);
+
+ if (header_is_bin_matchstate(header)) {
+ ErlBinMatchBuffer *mb;
+
+ ASSERT(HEADER_NUM_SLOTS(header) == 0);
+
+ mb = ms_matchbuffer(context);
+ position = mb->offset;
+
+ $Dst = context;
+ } else if (is_binary_header(header)) {
+ ErlBinMatchState *ms;
+
+ $GC_TEST_PRESERVE(ERL_BIN_MATCHSTATE_SIZE(0), live, context);
+ HEAP_TOP(c_p) = HTOP;
+#ifdef DEBUG
+ c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */
+#endif
+ ms = erts_bs_start_match_3(c_p, context);
+ HTOP = HEAP_TOP(c_p);
+ HEAP_SPACE_VERIFIED(0);
+
+ if (ms == NULL) {
+ $FAIL($Fail);
+ }
+
+ $REFRESH_GEN_DEST();
+ $Dst = make_matchstate(ms);
+ position = ms->mb.offset;
+ } else {
+ $FAIL($Fail);
+ }
+
+ ASSERT(IS_USMALL(0, position));
+ $Pos = make_small(position);
+}
+
+i_bs_start_match3 := i_bs_start_match3.fetch.execute;
+
+i_bs_start_match3.head() {
+ Eterm context;
+}
+
+i_bs_start_match3.fetch(Src) {
+ context = $Src;
+}
+
+i_bs_start_match3.execute(Live, Fail, Dst) {
+ Eterm header;
+ Uint live;
+
+ live = $Live;
+
+ if (!is_boxed(context)) {
+ $FAIL($Fail);
+ }
+
+ header = *boxed_val(context);
+
+ if (header_is_bin_matchstate(header)) {
+ ASSERT(HEADER_NUM_SLOTS(header) == 0);
+ $Dst = context;
+ } else if (is_binary_header(header)) {
+ ErlBinMatchState *ms;
+
+ $GC_TEST_PRESERVE(ERL_BIN_MATCHSTATE_SIZE(0), live, context);
+ HEAP_TOP(c_p) = HTOP;
+#ifdef DEBUG
+ c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */
+#endif
+ ms = erts_bs_start_match_3(c_p, context);
+ HTOP = HEAP_TOP(c_p);
+ HEAP_SPACE_VERIFIED(0);
+
+ if (ms == NULL) {
+ $FAIL($Fail);
+ }
+
+ $REFRESH_GEN_DEST();
+ $Dst = make_matchstate(ms);
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+bs_set_position(Ctx, Pos) {
+ ErlBinMatchBuffer* mb;
+ Eterm context;
+
+ context = $Ctx;
+ ASSERT(header_is_bin_matchstate(*boxed_val(context)));
+
+ mb = ms_matchbuffer(context);
+ mb->offset = unsigned_val($Pos);
+}
+
+i_bs_get_position(Ctx, Dst) {
+ ErlBinMatchBuffer* mb;
+ Eterm context;
+
+ context = $Ctx;
+ ASSERT(header_is_bin_matchstate(*boxed_val(context)));
+
+ mb = ms_matchbuffer(context);
+ $Dst = make_small(mb->offset);
+}
+
+%else
+
+#
+# Unlike their 64-bit counterparts, the 32-bit position instructions operate on
+# an offset from the "base position" of the context because storing raw
+# positions would lead to the creation of far too many bigints.
+#
+# When a match context is reused we check whether its position fits into an
+# immediate, and create a new match context if it does not. This means we only
+# have to allocate stuff roughly once every 16MB rather than every time we
+# match at a position beyond 16MB.
+#
+
+bs_set_position := bs_set_position.fetch.execute;
+
+bs_set_position.head() {
+ Eterm context, position;
+}
+
+bs_set_position.fetch(Ctx, Pos) {
+ context = $Ctx;
+ position = $Pos;
+}
+
+bs_set_position.execute() {
+ ErlBinMatchState *ms;
+
+ ASSERT(header_is_bin_matchstate(*boxed_val(context)));
+ ms = (ErlBinMatchState*)boxed_val(context);
+
+ if (ERTS_LIKELY(is_small(position))) {
+ ms->mb.offset = ms->save_offset[0] + unsigned_val(position);
+ } else {
+ ASSERT(is_big(position));
+ ms->mb.offset = ms->save_offset[0] + *BIG_V(big_val(position));
+ }
+}
+
+bs_get_position := bs_get_position.fetch.execute;
+
+bs_get_position.head() {
+ Eterm context;
+}
+
+bs_get_position.fetch(Ctx) {
+ context = $Ctx;
+}
+
+bs_get_position.execute(Dst, Live) {
+ ErlBinMatchState *ms;
+ Uint position;
+
+ ASSERT(header_is_bin_matchstate(*boxed_val(context)));
+ ms = (ErlBinMatchState*)boxed_val(context);
+
+ position = ms->mb.offset - ms->save_offset[0];
+
+ if (ERTS_LIKELY(IS_USMALL(0, position))) {
+ $Dst = make_small(position);
+ } else {
+ Eterm *hp;
+
+ $GC_TEST_PRESERVE(BIG_UINT_HEAP_SIZE, $Live, context);
+
+ hp = HTOP;
+ HTOP += BIG_UINT_HEAP_SIZE;
+
+ *hp = make_pos_bignum_header(1);
+ BIG_DIGIT(hp, 0) = position;
+
+ $REFRESH_GEN_DEST();
+ $Dst = make_big(hp);
+ }
+}
+
+i_bs_start_match3 := i_bs_start_match3.fetch.execute;
+
+i_bs_start_match3.head() {
+ Eterm context;
+}
+
+i_bs_start_match3.fetch(Src) {
+ context = $Src;
+}
+
+i_bs_start_match3.execute(Live, Fail, Dst) {
+ Eterm header;
+ Uint live;
+
+ live = $Live;
+
+ if (!is_boxed(context)) {
+ $FAIL($Fail);
+ }
+
+ header = *boxed_val(context);
+
+ if (header_is_bin_matchstate(header)) {
+ ErlBinMatchState *current_ms;
+ Uint position;
+
+ ASSERT(HEADER_NUM_SLOTS(header) == 1);
+
+ current_ms = (ErlBinMatchState*)boxed_val(context);
+ position = current_ms->mb.offset - current_ms->save_offset[0];
+
+ if (ERTS_LIKELY(IS_USMALL(0, position))) {
+ $Dst = context;
+ } else {
+ ErlBinMatchState *new_ms;
+
+ $GC_TEST_PRESERVE(ERL_BIN_MATCHSTATE_SIZE(1), live, context);
+ current_ms = (ErlBinMatchState*)boxed_val(context);
+
+ new_ms = (ErlBinMatchState*)HTOP;
+ HTOP += ERL_BIN_MATCHSTATE_SIZE(1);
+
+ new_ms->thing_word = HEADER_BIN_MATCHSTATE(1);
+ new_ms->save_offset[0] = current_ms->mb.offset;
+ new_ms->mb = current_ms->mb;
+
+ $REFRESH_GEN_DEST();
+ $Dst = make_matchstate(new_ms);
+ }
+ } else if (is_binary_header(header)) {
+ Eterm result;
+
+ $GC_TEST_PRESERVE(ERL_BIN_MATCHSTATE_SIZE(1), live, context);
+ HEAP_TOP(c_p) = HTOP;
+
+#ifdef DEBUG
+ c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */
+#endif
+
+ /* We intentionally use erts_bs_start_match_2 so that we can use
+ * save_offset as a base for all saved positions on this context,
+ * allowing us to avoid bigints for much longer. */
+ result = erts_bs_start_match_2(c_p, context, 1);
+
+ HTOP = HEAP_TOP(c_p);
+ HEAP_SPACE_VERIFIED(0);
+
+ if (is_non_value(result)) {
+ $FAIL($Fail);
+ }
+
+ $REFRESH_GEN_DEST();
+ $Dst = result;
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+%endif
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index e7bd046e18..db74b06cc5 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -854,7 +854,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
case EXTERNAL_REF_SUBTAG:
{
ExternalThing *etp = (ExternalThing *) objp;
- erts_refc_inc(&etp->node->refc, 2);
+ erts_ref_node_entry(etp->node, 2, make_boxed(htop));
}
L_off_heap_node_container_common:
{
@@ -1660,7 +1660,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
case EXTERNAL_REF_SUBTAG:
{
ExternalThing *etp = (ExternalThing *) ptr;
- erts_refc_inc(&etp->node->refc, 2);
+ erts_ref_node_entry(etp->node, 2, make_boxed(hp));
}
off_heap_node_container_common:
{
@@ -1866,7 +1866,7 @@ Eterm copy_shallow(Eterm* ERTS_RESTRICT ptr, Uint sz, Eterm** hpp,
case EXTERNAL_REF_SUBTAG:
{
ExternalThing* etp = (ExternalThing *) (tp-1);
- erts_refc_inc(&etp->node->refc, 2);
+ erts_ref_node_entry(etp->node, 2, make_boxed(hp-1));
}
off_heap_common:
{
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index bbe2fc31f3..5e48a553af 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -50,19 +50,26 @@
#define DIST_CTL_DEFAULT_SIZE 64
/* Turn this on to get printouts of all distribution messages
- * which go on the line
+ * which go on the line. Enabling this may make some testcases
+ * fail. Especially the broken dist testcases in distribution_SUITE.
*/
#if 0
#define ERTS_DIST_MSG_DBG
#endif
#if 0
+/* Enable this to print the dist debug messages to a file instead */
+#define ERTS_DIST_MSG_DBG_FILE "/tmp/dist_dbg.%d"
+#endif
+#if 0
+/* Enable this to print the raw bytes sent and received */
#define ERTS_RAW_DIST_MSG_DBG
#endif
#if defined(ERTS_DIST_MSG_DBG) || defined(ERTS_RAW_DIST_MSG_DBG)
+FILE *dbg_file;
static void bw(byte *buf, ErlDrvSizeT sz)
{
- bin_write(ERTS_PRINT_STDERR, NULL, buf, sz);
+ bin_write(ERTS_PRINT_FILE, dbg_file, buf, sz);
}
#endif
@@ -70,39 +77,93 @@ static void bw(byte *buf, ErlDrvSizeT sz)
static void
dist_msg_dbg(ErtsDistExternal *edep, char *what, byte *buf, int sz)
{
- ErtsHeapFactory factory;
- DeclareTmpHeapNoproc(ctl_default,DIST_CTL_DEFAULT_SIZE);
- Eterm* ctl = ctl_default;
- byte *extp = edep->extp;
+ byte *extp = edep->data->extp;
Eterm msg;
Sint ctl_len;
- Sint size = ctl_len = erts_decode_dist_ext_size(edep);
+ Sint size = ctl_len = erts_decode_dist_ext_size(edep, 0);
if (size < 0) {
- erts_fprintf(stderr,
+ erts_fprintf(dbg_file,
"DIST MSG DEBUG: erts_decode_dist_ext_size(%s) failed:\n",
what);
bw(buf, sz);
}
else {
- ErlHeapFragment *mbuf = new_message_buffer(size);
- erts_factory_static_init(&factory, ctl, ctl_len, &mbuf->off_heap);
- msg = erts_decode_dist_ext(&factory, edep);
+ ErtsHeapFactory factory;
+ ErtsMessage *mbuf = erts_factory_message_create(&factory, NULL, 0, ctl_len);
+ /* Set mbuf msg to NIL as erts_factory_undo will fail otherwise */
+ ERL_MESSAGE_TERM(mbuf) = NIL;
+ msg = erts_decode_dist_ext(&factory, edep, 0);
if (is_value(msg))
- erts_fprintf(stderr, " %s: %T\n", what, msg);
+ erts_fprintf(dbg_file, " %s: %.80T\n", what, msg);
else {
- erts_fprintf(stderr,
+ erts_fprintf(dbg_file,
"DIST MSG DEBUG: erts_decode_dist_ext(%s) failed:\n",
what);
bw(buf, sz);
}
- free_message_buffer(mbuf);
- edep->extp = extp;
+ erts_factory_undo(&factory);
+ edep->data->extp = extp;
}
}
+static char *erts_dop_to_string(enum dop dop) {
+ if (dop == DOP_LINK)
+ return "LINK";
+ if (dop == DOP_SEND)
+ return "SEND";
+ if (dop == DOP_EXIT)
+ return "EXIT";
+ if (dop == DOP_UNLINK)
+ return "UNLINK";
+ if (dop == DOP_REG_SEND)
+ return "REG_SEND";
+ if (dop == DOP_GROUP_LEADER)
+ return "GROUP_LEADER";
+ if (dop == DOP_EXIT2)
+ return "EXIT2";
+ if (dop == DOP_SEND_TT)
+ return "SEND_TT";
+ if (dop == DOP_EXIT_TT)
+ return "EXIT_TT";
+ if (dop == DOP_REG_SEND_TT)
+ return "REG_SEND_TT";
+ if (dop == DOP_EXIT2_TT)
+ return "EXIT2_TT";
+ if (dop == DOP_MONITOR_P)
+ return "MONITOR_P";
+ if (dop == DOP_DEMONITOR_P)
+ return "DEMONITOR_P";
+ if (dop == DOP_MONITOR_P_EXIT)
+ return "MONITOR_P_EXIT";
+ if (dop == DOP_SEND_SENDER)
+ return "SEND_SENDER";
+ if (dop == DOP_SEND_SENDER_TT)
+ return "SEND_SENDER_TT";
+ if (dop == DOP_PAYLOAD_EXIT)
+ return "PAYLOAD_EXIT";
+ if (dop == DOP_PAYLOAD_EXIT_TT)
+ return "PAYLOAD_EXIT_TT";
+ if (dop == DOP_PAYLOAD_EXIT2)
+ return "PAYLOAD_EXIT2";
+ if (dop == DOP_PAYLOAD_EXIT2_TT)
+ return "PAYLOAD_EXIT2_TT";
+ if (dop == DOP_PAYLOAD_MONITOR_P_EXIT)
+ return "PAYLOAD_MONITOR_P_EXIT";
+ ASSERT(0);
+ return "UNKNOWN";
+}
+
#endif
+#if defined(VALGRIND)
+#include <valgrind/valgrind.h>
+#include <valgrind/memcheck.h>
+# define PURIFY_MSG(msg) \
+ VALGRIND_PRINTF("%s, line %d: %s", __FILE__, __LINE__, msg)
+#else
+# define PURIFY_MSG(msg)
+#endif
int erts_is_alive; /* System must be blocked on change */
int erts_dist_buf_busy_limit;
@@ -116,12 +177,17 @@ static Export *dist_ctrl_put_data_trap;
/* forward declarations */
-static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy);
+static void erts_schedule_dist_command(Port *, DistEntry *);
+static int dsig_send_exit(ErtsDSigSendContext *ctx, Eterm ctl, Eterm msg);
+static int dsig_send_ctl(ErtsDSigSendContext *ctx, Eterm ctl);
static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm);
static void init_nodes_monitors(void);
static Sint abort_connection(DistEntry* dep, Uint32 conn_id);
static ErtsDistOutputBuf* clear_de_out_queues(DistEntry*);
static void free_de_out_queues(DistEntry*, ErtsDistOutputBuf*);
+int erts_dist_seq_tree_foreach_delete_yielding(DistSeqNode **root,
+ void **vyspp,
+ Sint limit);
static erts_atomic_t no_caches;
static erts_atomic_t no_nodes;
@@ -142,7 +208,6 @@ delete_cache(ErtsAtomCache *cache)
}
}
-
static void
create_cache(DistEntry *dep)
{
@@ -185,32 +250,36 @@ get_suspended_on_de(DistEntry *dep, erts_aint32_t unset_qflgs)
}
}
-#define ERTS_MON_LNK_FIRE_LIMIT 100
+#define ERTS_MON_LNK_FIRE_REDS 40
-static void monitor_connection_down(ErtsMonitor *mon, void *unused)
+static int monitor_connection_down(ErtsMonitor *mon, void *unused, Sint reds)
{
if (erts_monitor_is_origin(mon))
erts_proc_sig_send_demonitor(mon);
else
erts_proc_sig_send_monitor_down(mon, am_noconnection);
+ return ERTS_MON_LNK_FIRE_REDS;
}
-static void link_connection_down(ErtsLink *lnk, void *vdist)
+static int link_connection_down(ErtsLink *lnk, void *vdist, Sint reds)
{
erts_proc_sig_send_link_exit(NULL, THE_NON_VALUE, lnk,
am_noconnection, NIL);
+ return ERTS_MON_LNK_FIRE_REDS;
}
typedef enum {
ERTS_CML_CLEANUP_STATE_LINKS,
ERTS_CML_CLEANUP_STATE_MONITORS,
ERTS_CML_CLEANUP_STATE_ONAME_MONITORS,
+ ERTS_CML_CLEANUP_STATE_SEQUENCES,
ERTS_CML_CLEANUP_STATE_NODE_MONITORS
-} ErtsConMonLnkCleaupState;
+} ErtsConMonLnkSeqCleanupState;
typedef struct {
- ErtsConMonLnkCleaupState state;
+ ErtsConMonLnkSeqCleanupState state;
ErtsMonLnkDist *dist;
+ DistSeqNode *seq;
void *yield_state;
int trigger_node_monitors;
Eterm nodename;
@@ -218,49 +287,58 @@ typedef struct {
Eterm reason;
ErlOffHeap oh;
Eterm heap[1];
-} ErtsConMonLnkCleanup;
+} ErtsConMonLnkSeqCleanup;
static void
-con_monitor_link_cleanup(void *vcmlcp)
+con_monitor_link_seq_cleanup(void *vcmlcp)
{
- ErtsConMonLnkCleanup *cmlcp = vcmlcp;
+ ErtsConMonLnkSeqCleanup *cmlcp = vcmlcp;
ErtsMonLnkDist *dist = cmlcp->dist;
ErtsSchedulerData *esdp;
- int yield;
+ int reds = CONTEXT_REDS;
switch (cmlcp->state) {
case ERTS_CML_CLEANUP_STATE_LINKS:
- yield = erts_link_list_foreach_delete_yielding(&dist->links,
- link_connection_down,
- NULL, &cmlcp->yield_state,
- ERTS_MON_LNK_FIRE_LIMIT);
- if (yield)
+ reds = erts_link_list_foreach_delete_yielding(&dist->links,
+ link_connection_down,
+ NULL, &cmlcp->yield_state,
+ reds);
+ if (reds <= 0)
break;
ASSERT(!cmlcp->yield_state);
cmlcp->state = ERTS_CML_CLEANUP_STATE_MONITORS;
case ERTS_CML_CLEANUP_STATE_MONITORS:
- yield = erts_monitor_list_foreach_delete_yielding(&dist->monitors,
- monitor_connection_down,
- NULL, &cmlcp->yield_state,
- ERTS_MON_LNK_FIRE_LIMIT);
- if (yield)
+ reds = erts_monitor_list_foreach_delete_yielding(&dist->monitors,
+ monitor_connection_down,
+ NULL, &cmlcp->yield_state,
+ reds);
+ if (reds <= 0)
break;
ASSERT(!cmlcp->yield_state);
cmlcp->state = ERTS_CML_CLEANUP_STATE_ONAME_MONITORS;
case ERTS_CML_CLEANUP_STATE_ONAME_MONITORS:
- yield = erts_monitor_tree_foreach_delete_yielding(&dist->orig_name_monitors,
- monitor_connection_down,
- NULL, &cmlcp->yield_state,
- ERTS_MON_LNK_FIRE_LIMIT/2);
- if (yield)
+ reds = erts_monitor_tree_foreach_delete_yielding(&dist->orig_name_monitors,
+ monitor_connection_down,
+ NULL, &cmlcp->yield_state,
+ reds);
+ if (reds <= 0)
break;
cmlcp->dist = NULL;
erts_mon_link_dist_dec_refc(dist);
ASSERT(!cmlcp->yield_state);
+ cmlcp->state = ERTS_CML_CLEANUP_STATE_SEQUENCES;
+ case ERTS_CML_CLEANUP_STATE_SEQUENCES:
+ reds = erts_dist_seq_tree_foreach_delete_yielding(&cmlcp->seq,
+ &cmlcp->yield_state,
+ reds);
+ if (reds <= 0)
+ break;
+
+ ASSERT(!cmlcp->yield_state);
cmlcp->state = ERTS_CML_CLEANUP_STATE_NODE_MONITORS;
case ERTS_CML_CLEANUP_STATE_NODE_MONITORS:
if (cmlcp->trigger_node_monitors) {
@@ -280,22 +358,23 @@ con_monitor_link_cleanup(void *vcmlcp)
esdp = erts_get_scheduler_data();
ASSERT(esdp && esdp->type == ERTS_SCHED_NORMAL);
erts_schedule_misc_aux_work((int) esdp->no,
- con_monitor_link_cleanup,
+ con_monitor_link_seq_cleanup,
(void *) cmlcp);
}
static void
-schedule_con_monitor_link_cleanup(ErtsMonLnkDist *dist,
- Eterm nodename,
- Eterm visability,
- Eterm reason)
+schedule_con_monitor_link_seq_cleanup(ErtsMonLnkDist *dist,
+ DistSeqNode *seq,
+ Eterm nodename,
+ Eterm visability,
+ Eterm reason)
{
- if (dist || is_value(nodename)) {
+ if (dist || is_value(nodename) || seq) {
ErtsSchedulerData *esdp;
- ErtsConMonLnkCleanup *cmlcp;
+ ErtsConMonLnkSeqCleanup *cmlcp;
Uint rsz, size;
- size = sizeof(ErtsConMonLnkCleanup);
+ size = sizeof(ErtsConMonLnkSeqCleanup);
if (is_non_value(reason) || is_immed(reason)) {
rsz = 0;
@@ -322,6 +401,8 @@ schedule_con_monitor_link_cleanup(ErtsMonLnkDist *dist,
erts_mtx_unlock(&dist->mtx);
}
+ cmlcp->seq = seq;
+
cmlcp->trigger_node_monitors = is_value(nodename);
cmlcp->nodename = nodename;
cmlcp->visability = visability;
@@ -335,13 +416,13 @@ schedule_con_monitor_link_cleanup(ErtsMonLnkDist *dist,
esdp = erts_get_scheduler_data();
ASSERT(esdp && esdp->type == ERTS_SCHED_NORMAL);
erts_schedule_misc_aux_work((int) esdp->no,
- con_monitor_link_cleanup,
+ con_monitor_link_seq_cleanup,
(void *) cmlcp);
}
}
/*
-** A full node name constists of a "n@h"
+** A full node name consists of a "n@h"
**
** n must be a valid node name: string of ([a-z][A-Z][0-9]_-)+
**
@@ -558,6 +639,7 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
}
else { /* Call from distribution controller (port/process) */
ErtsMonLnkDist *mld;
+ DistSeqNode *sequences;
ErtsAtomCache *cache;
ErtsProcList *suspendees;
ErtsDistOutputBuf *obuf;
@@ -574,9 +656,7 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
}
if (dep->state == ERTS_DE_STATE_EXITING) {
-#ifdef DEBUG
ASSERT(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT);
-#endif
}
else {
dep->state = ERTS_DE_STATE_EXITING;
@@ -589,6 +669,9 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
mld = dep->mld;
dep->mld = NULL;
+ sequences = dep->sequences;
+ dep->sequences = NULL;
+
nodename = dep->sysname;
flags = dep->flags;
@@ -612,14 +695,15 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
erts_de_rwunlock(dep);
- schedule_con_monitor_link_cleanup(mld,
- nodename,
- (flags & DFLAG_PUBLISHED
- ? am_visible
- : am_hidden),
- (reason == am_normal
- ? am_connection_closed
- : reason));
+ schedule_con_monitor_link_seq_cleanup(mld,
+ sequences,
+ nodename,
+ (flags & DFLAG_PUBLISHED
+ ? am_visible
+ : am_hidden),
+ (reason == am_normal
+ ? am_connection_closed
+ : reason));
erts_resume_processes(suspendees);
@@ -653,6 +737,16 @@ void init_dist(void)
{
init_nodes_monitors();
+#ifdef ERTS_DIST_MSG_DBG_FILE
+ {
+ char buff[255];
+ sprintf(buff, ERTS_DIST_MSG_DBG_FILE, getpid());
+ dbg_file = fopen(buff,"w+");
+ }
+#elif defined(ERTS_DIST_MSG_DBG) || defined(ERTS_RAW_DIST_MSG_DBG)
+ dbg_file = stderr;
+#endif
+
nodedown.reason = NIL;
nodedown.bp = NULL;
@@ -676,21 +770,33 @@ void init_dist(void)
}
}
-#define ErtsDistOutputBuf2Binary(OB) \
- ((Binary *) (((char *) (OB)) - offsetof(Binary, orig_bytes)))
+#define ErtsDistOutputBuf2Binary(OB) OB->bin
static ERTS_INLINE ErtsDistOutputBuf *
-alloc_dist_obuf(Uint size)
+alloc_dist_obuf(Uint size, Uint headers)
{
+ Uint obuf_size = sizeof(ErtsDistOutputBuf)*(headers);
ErtsDistOutputBuf *obuf;
- Uint obuf_size = sizeof(ErtsDistOutputBuf)+sizeof(byte)*(size-1);
- Binary *bin = erts_bin_drv_alloc(obuf_size);
- obuf = (ErtsDistOutputBuf *) &bin->orig_bytes[0];
+ Binary *bin;
+ byte *extp;
+ int i;
+
+ bin = erts_bin_drv_alloc(obuf_size + size);
+ erts_refc_add(&bin->intern.refc, headers - 1, 1);
+
+ obuf = (ErtsDistOutputBuf *)&bin->orig_bytes[0];
+ extp = (byte *)&bin->orig_bytes[obuf_size];
+
+ for (i = 0; i < headers; i++) {
+ obuf[i].bin = bin;
+ obuf[i].extp = extp;
#ifdef DEBUG
- obuf->dbg_pattern = ERTS_DIST_OUTPUT_BUF_DBG_PATTERN;
- obuf->alloc_endp = obuf->data + size;
- ASSERT(bin == ErtsDistOutputBuf2Binary(obuf));
+ obuf[i].dbg_pattern = ERTS_DIST_OUTPUT_BUF_DBG_PATTERN;
+ obuf[i].ext_startp = extp;
+ obuf[i].alloc_endp = &extp[size];
+ ASSERT(bin == ErtsDistOutputBuf2Binary(obuf));
#endif
+ }
return obuf;
}
@@ -705,8 +811,8 @@ free_dist_obuf(ErtsDistOutputBuf *obuf)
static ERTS_INLINE Sint
size_obuf(ErtsDistOutputBuf *obuf)
{
- Binary *bin = ErtsDistOutputBuf2Binary(obuf);
- return bin->orig_size;
+ return sizeof(ErtsDistOutputBuf) + (obuf->ext_endp - obuf->ext_start)
+ + (obuf->hdr_endp - obuf->hdrp);
}
static ErtsDistOutputBuf* clear_de_out_queues(DistEntry* dep)
@@ -760,18 +866,20 @@ static void free_de_out_queues(DistEntry* dep, ErtsDistOutputBuf *obuf)
int erts_dsend_context_dtor(Binary* ctx_bin)
{
- ErtsSendContext* ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
- switch (ctx->dss.phase) {
+ ErtsDSigSendContext* ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
+ switch (ctx->phase) {
case ERTS_DSIG_SEND_PHASE_MSG_SIZE:
- DESTROY_SAVED_WSTACK(&ctx->dss.u.sc.wstack);
+ DESTROY_SAVED_WSTACK(&ctx->u.sc.wstack);
break;
case ERTS_DSIG_SEND_PHASE_MSG_ENCODE:
- DESTROY_SAVED_WSTACK(&ctx->dss.u.ec.wstack);
+ DESTROY_SAVED_WSTACK(&ctx->u.ec.wstack);
break;
default:;
}
- if (ctx->dss.phase >= ERTS_DSIG_SEND_PHASE_ALLOC && ctx->dss.obuf) {
- free_dist_obuf(ctx->dss.obuf);
+ if (ctx->phase >= ERTS_DSIG_SEND_PHASE_ALLOC && ctx->obuf) {
+ int i;
+ for (i = 0; i < ctx->fragments; i++)
+ free_dist_obuf(&ctx->obuf[i]);
}
if (ctx->deref_dep)
erts_deref_dist_entry(ctx->dep);
@@ -779,10 +887,10 @@ int erts_dsend_context_dtor(Binary* ctx_bin)
return 1;
}
-Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
+Eterm erts_dsend_export_trap_context(Process* p, ErtsDSigSendContext* ctx)
{
struct exported_ctx {
- ErtsSendContext ctx;
+ ErtsDSigSendContext ctx;
ErtsAtomCacheMap acm;
};
Binary* ctx_bin = erts_create_magic_binary(sizeof(struct exported_ctx),
@@ -790,12 +898,12 @@ Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
struct exported_ctx* dst = ERTS_MAGIC_BIN_DATA(ctx_bin);
Eterm* hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
- sys_memcpy(&dst->ctx, ctx, sizeof(ErtsSendContext));
- ASSERT(ctx->dss.ctl == make_tuple(ctx->ctl_heap));
- dst->ctx.dss.ctl = make_tuple(dst->ctx.ctl_heap);
- if (ctx->dss.acmp) {
- sys_memcpy(&dst->acm, ctx->dss.acmp, sizeof(ErtsAtomCacheMap));
- dst->ctx.dss.acmp = &dst->acm;
+ sys_memcpy(&dst->ctx, ctx, sizeof(ErtsDSigSendContext));
+ ASSERT(ctx->ctl == make_tuple(ctx->ctl_heap));
+ dst->ctx.ctl = make_tuple(dst->ctx.ctl_heap);
+ if (ctx->acmp) {
+ sys_memcpy(&dst->acm, ctx->acmp, sizeof(ErtsAtomCacheMap));
+ dst->ctx.acmp = &dst->acm;
}
return erts_mk_magic_ref(&hp, &MSO(p), ctx_bin);
}
@@ -816,71 +924,58 @@ Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
** Send a DOP_LINK link message
*/
int
-erts_dsig_send_link(ErtsDSigData *dsdp, Eterm local, Eterm remote)
+erts_dsig_send_link(ErtsDSigSendContext *ctx, Eterm local, Eterm remote)
{
- DeclareTmpHeapNoproc(ctl_heap,4);
- Eterm ctl = TUPLE3(&ctl_heap[0], make_small(DOP_LINK), local, remote);
- int res;
- UseTmpHeapNoproc(4);
-
- res = dsig_send_ctl(dsdp, ctl, 0);
- UnUseTmpHeapNoproc(4);
- return res;
+ Eterm ctl = TUPLE3(&ctx->ctl_heap[0], make_small(DOP_LINK), local, remote);
+ return dsig_send_ctl(ctx, ctl);
}
int
-erts_dsig_send_unlink(ErtsDSigData *dsdp, Eterm local, Eterm remote)
+erts_dsig_send_unlink(ErtsDSigSendContext *ctx, Eterm local, Eterm remote)
{
- DeclareTmpHeapNoproc(ctl_heap,4);
- Eterm ctl = TUPLE3(&ctl_heap[0], make_small(DOP_UNLINK), local, remote);
- int res;
-
- UseTmpHeapNoproc(4);
- res = dsig_send_ctl(dsdp, ctl, 0);
- UnUseTmpHeapNoproc(4);
- return res;
+ Eterm ctl = TUPLE3(&ctx->ctl_heap[0], make_small(DOP_UNLINK), local, remote);
+ return dsig_send_ctl(ctx, ctl);
}
/* A local process that's being monitored by a remote one exits. We send:
{DOP_MONITOR_P_EXIT, Local pid or name, Remote pid, ref, reason} */
int
-erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
- Eterm ref, Eterm reason)
+erts_dsig_send_m_exit(ErtsDSigSendContext *ctx, Eterm watcher, Eterm watched,
+ Eterm ref, Eterm reason)
{
- Eterm ctl;
- DeclareTmpHeapNoproc(ctl_heap,6);
- int res;
+ Eterm ctl, msg;
- if (~dsdp->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
+ if (~ctx->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
/*
* Receiver does not support DOP_MONITOR_P_EXIT (see dsig_send_monitor)
*/
return ERTS_DSIG_SEND_OK;
}
- UseTmpHeapNoproc(6);
-
- ctl = TUPLE5(&ctl_heap[0], make_small(DOP_MONITOR_P_EXIT),
- watched, watcher, ref, reason);
+ if (ctx->dep->flags & DFLAG_EXIT_PAYLOAD) {
+ ctl = TUPLE4(&ctx->ctl_heap[0], make_small(DOP_PAYLOAD_MONITOR_P_EXIT),
+ watched, watcher, ref);
+ msg = reason;
+ } else {
+ ctl = TUPLE5(&ctx->ctl_heap[0], make_small(DOP_MONITOR_P_EXIT),
+ watched, watcher, ref, reason);
+ msg = THE_NON_VALUE;
+ }
- res = dsig_send_ctl(dsdp, ctl, 1);
- UnUseTmpHeapNoproc(6);
- return res;
+ return dsig_send_exit(ctx, ctl, msg);
}
/* We want to monitor a process (named or unnamed) on another node, we send:
{DOP_MONITOR_P, Local pid, Remote pid or name, Ref}, which is exactly what's
needed on the other side... */
int
-erts_dsig_send_monitor(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
+erts_dsig_send_monitor(ErtsDSigSendContext *ctx, Eterm watcher, Eterm watched,
Eterm ref)
{
Eterm ctl;
- DeclareTmpHeapNoproc(ctl_heap,5);
- int res;
- if (~dsdp->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
+ if (~ctx->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
/*
* Receiver does not support DOP_MONITOR_P.
* Just avoid sending it and by doing that reduce this monitor
@@ -890,48 +985,40 @@ erts_dsig_send_monitor(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
return ERTS_DSIG_SEND_OK;
}
- UseTmpHeapNoproc(5);
- ctl = TUPLE4(&ctl_heap[0],
+ ctl = TUPLE4(&ctx->ctl_heap[0],
make_small(DOP_MONITOR_P),
watcher, watched, ref);
- res = dsig_send_ctl(dsdp, ctl, 0);
- UnUseTmpHeapNoproc(5);
- return res;
+ return dsig_send_ctl(ctx, ctl);
}
/* A local process monitoring a remote one wants to stop monitoring, either
because of a demonitor bif call or because the local process died. We send
{DOP_DEMONITOR_P, Local pid, Remote pid or name, ref} */
int
-erts_dsig_send_demonitor(ErtsDSigData *dsdp, Eterm watcher,
- Eterm watched, Eterm ref, int force)
+erts_dsig_send_demonitor(ErtsDSigSendContext *ctx, Eterm watcher,
+ Eterm watched, Eterm ref)
{
Eterm ctl;
- DeclareTmpHeapNoproc(ctl_heap,5);
- int res;
- if (~dsdp->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
+ if (~ctx->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
/*
* Receiver does not support DOP_DEMONITOR_P (see dsig_send_monitor)
*/
return ERTS_DSIG_SEND_OK;
}
- UseTmpHeapNoproc(5);
- ctl = TUPLE4(&ctl_heap[0],
+ ctl = TUPLE4(&ctx->ctl_heap[0],
make_small(DOP_DEMONITOR_P),
watcher, watched, ref);
- res = dsig_send_ctl(dsdp, ctl, force);
- UnUseTmpHeapNoproc(5);
- return res;
+ return dsig_send_ctl(ctx, ctl);
}
-static int can_send_seqtrace_token(ErtsSendContext* ctx, Eterm token) {
+static int can_send_seqtrace_token(ErtsDSigSendContext* ctx, Eterm token) {
Eterm label;
- if (ctx->dsd.flags & DFLAG_BIG_SEQTRACE_LABELS) {
+ if (ctx->flags & DFLAG_BIG_SEQTRACE_LABELS) {
/* The other end is capable of handling arbitrary seq_trace labels. */
return 1;
}
@@ -947,11 +1034,11 @@ static int can_send_seqtrace_token(ErtsSendContext* ctx, Eterm token) {
}
int
-erts_dsig_send_msg(Eterm remote, Eterm message, ErtsSendContext* ctx)
+erts_dsig_send_msg(ErtsDSigSendContext* ctx, Eterm remote, Eterm message)
{
Eterm ctl;
Eterm token = NIL;
- Process *sender = ctx->dsd.proc;
+ Process *sender = ctx->c_p;
int res;
#ifdef USE_VM_PROBES
Sint tok_label = 0;
@@ -972,7 +1059,7 @@ erts_dsig_send_msg(Eterm remote, Eterm message, ErtsSendContext* ctx)
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
- "%T", ctx->dsd.dep->sysname);
+ "%T", ctx->dep->sysname);
erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
"%T", sender->common.id);
erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
@@ -992,7 +1079,7 @@ erts_dsig_send_msg(Eterm remote, Eterm message, ErtsSendContext* ctx)
send_token = (token != NIL && can_send_seqtrace_token(ctx, token));
- if (ctx->dsd.flags & DFLAG_SEND_SENDER) {
+ if (ctx->flags & DFLAG_SEND_SENDER) {
dist_op = make_small(send_token ?
DOP_SEND_SENDER_TT :
DOP_SEND_SENDER);
@@ -1015,21 +1102,18 @@ erts_dsig_send_msg(Eterm remote, Eterm message, ErtsSendContext* ctx)
msize, tok_label, tok_lastcnt, tok_serial);
DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
- ctx->dss.ctl = ctl;
- ctx->dss.msg = message;
- ctx->dss.force_busy = 0;
- res = erts_dsig_send(&ctx->dsd, &ctx->dss);
+ ctx->ctl = ctl;
+ ctx->msg = message;
+ res = erts_dsig_send(ctx);
return res;
}
int
-erts_dsig_send_reg_msg(Eterm remote_name, Eterm message,
- ErtsSendContext* ctx)
+erts_dsig_send_reg_msg(ErtsDSigSendContext* ctx, Eterm remote_name, Eterm message)
{
Eterm ctl;
Eterm token = NIL;
- Process *sender = ctx->dsd.proc;
- int res;
+ Process *sender = ctx->c_p;
#ifdef USE_VM_PROBES
Sint tok_label = 0;
Sint tok_lastcnt = 0;
@@ -1049,7 +1133,7 @@ erts_dsig_send_reg_msg(Eterm remote_name, Eterm message,
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
- "%T", ctx->dsd.dep->sysname);
+ "%T", ctx->dep->sysname);
erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
"%T", sender->common.id);
erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
@@ -1074,23 +1158,19 @@ erts_dsig_send_reg_msg(Eterm remote_name, Eterm message,
msize, tok_label, tok_lastcnt, tok_serial);
DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
- ctx->dss.ctl = ctl;
- ctx->dss.msg = message;
- ctx->dss.force_busy = 0;
- res = erts_dsig_send(&ctx->dsd, &ctx->dss);
- return res;
+ ctx->ctl = ctl;
+ ctx->msg = message;
+ return erts_dsig_send(ctx);
}
/* local has died, deliver the exit signal to remote */
int
-erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
+erts_dsig_send_exit_tt(ErtsDSigSendContext *ctx, Eterm local, Eterm remote,
Eterm reason, Eterm token)
{
- Eterm ctl;
- DeclareTmpHeapNoproc(ctl_heap,6);
- int res;
+ Eterm ctl, msg = THE_NON_VALUE;
#ifdef USE_VM_PROBES
- Process *sender = dsdp->proc;
+ Process *sender = ctx->c_p;
Sint tok_label = 0;
Sint tok_lastcnt = 0;
Sint tok_serial = 0;
@@ -1100,20 +1180,29 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
DTRACE_CHARBUF(reason_str, 128);
#endif
- UseTmpHeapNoproc(6);
+ if (ctx->dep->flags & DFLAG_EXIT_PAYLOAD)
+ msg = reason;
+
if (have_seqtrace(token)) {
- seq_trace_update_send(dsdp->proc);
+ seq_trace_update_send(ctx->c_p);
seq_trace_output_exit(token, reason, SEQ_TRACE_SEND, remote, local);
- ctl = TUPLE5(&ctl_heap[0],
- make_small(DOP_EXIT_TT), local, remote, token, reason);
+ if (ctx->dep->flags & DFLAG_EXIT_PAYLOAD) {
+ ctl = TUPLE4(&ctx->ctl_heap[0],
+ make_small(DOP_PAYLOAD_EXIT_TT), local, remote, token);
+ } else
+ ctl = TUPLE5(&ctx->ctl_heap[0],
+ make_small(DOP_EXIT_TT), local, remote, token, reason);
} else {
- ctl = TUPLE4(&ctl_heap[0], make_small(DOP_EXIT), local, remote, reason);
+ if (ctx->dep->flags & DFLAG_EXIT_PAYLOAD)
+ ctl = TUPLE3(&ctx->ctl_heap[0], make_small(DOP_PAYLOAD_EXIT), local, remote);
+ else
+ ctl = TUPLE4(&ctx->ctl_heap[0], make_small(DOP_EXIT), local, remote, reason);
}
#ifdef USE_VM_PROBES
*node_name = *sender_name = *remote_name = '\0';
if (DTRACE_ENABLED(process_exit_signal_remote)) {
erts_snprintf(node_name, sizeof(DTRACE_CHARBUF_NAME(node_name)),
- "%T", dsdp->dep->sysname);
+ "%T", ctx->dep->sysname);
erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
"%T", sender->common.id);
erts_snprintf(remote_name, sizeof(DTRACE_CHARBUF_NAME(remote_name)),
@@ -1129,73 +1218,171 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
#endif
DTRACE7(process_exit_signal_remote, sender_name, node_name,
remote_name, reason_str, tok_label, tok_lastcnt, tok_serial);
- /* forced, i.e ignore busy */
- res = dsig_send_ctl(dsdp, ctl, 1);
- UnUseTmpHeapNoproc(6);
- return res;
+ return dsig_send_exit(ctx, ctl, msg);
}
int
-erts_dsig_send_exit(ErtsDSigData *dsdp, Eterm local, Eterm remote, Eterm reason)
+erts_dsig_send_exit(ErtsDSigSendContext *ctx, Eterm local, Eterm remote, Eterm reason)
{
- DeclareTmpHeapNoproc(ctl_heap,5);
- int res;
- Eterm ctl;
+ Eterm ctl, msg = ctx->dep->flags & DFLAG_EXIT_PAYLOAD ? reason : THE_NON_VALUE;
- UseTmpHeapNoproc(5);
- ctl = TUPLE4(&ctl_heap[0],
- make_small(DOP_EXIT), local, remote, reason);
- /* forced, i.e ignore busy */
- res = dsig_send_ctl(dsdp, ctl, 1);
- UnUseTmpHeapNoproc(5);
- return res;
+ if (ctx->dep->flags & DFLAG_EXIT_PAYLOAD) {
+ ctl = TUPLE3(&ctx->ctl_heap[0], make_small(DOP_PAYLOAD_EXIT), local, remote);
+ msg = reason;
+ } else {
+ ctl = TUPLE4(&ctx->ctl_heap[0], make_small(DOP_EXIT), local, remote, reason);
+ msg = THE_NON_VALUE;
+ }
+ return dsig_send_exit(ctx, ctl, msg);
}
int
-erts_dsig_send_exit2(ErtsDSigData *dsdp, Eterm local, Eterm remote, Eterm reason)
+erts_dsig_send_exit2(ErtsDSigSendContext *ctx, Eterm local, Eterm remote, Eterm reason)
{
- DeclareTmpHeapNoproc(ctl_heap,5);
- int res;
- Eterm ctl;
+ Eterm ctl, msg;
- UseTmpHeapNoproc(5);
- ctl = TUPLE4(&ctl_heap[0],
- make_small(DOP_EXIT2), local, remote, reason);
+ if (ctx->dep->flags & DFLAG_EXIT_PAYLOAD) {
+ ctl = TUPLE3(&ctx->ctl_heap[0],
+ make_small(DOP_PAYLOAD_EXIT2), local, remote);
+ msg = reason;
+ } else {
+ ctl = TUPLE4(&ctx->ctl_heap[0],
+ make_small(DOP_EXIT2), local, remote, reason);
+ msg = THE_NON_VALUE;
+ }
- res = dsig_send_ctl(dsdp, ctl, 0);
- UnUseTmpHeapNoproc(5);
- return res;
+ return dsig_send_exit(ctx, ctl, msg);
}
int
-erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote)
+erts_dsig_send_group_leader(ErtsDSigSendContext *ctx, Eterm leader, Eterm remote)
{
- DeclareTmpHeapNoproc(ctl_heap,4);
- int res;
Eterm ctl;
- UseTmpHeapNoproc(4);
- ctl = TUPLE3(&ctl_heap[0],
+ ctl = TUPLE3(&ctx->ctl_heap[0],
make_small(DOP_GROUP_LEADER), leader, remote);
- res = dsig_send_ctl(dsdp, ctl, 0);
- UnUseTmpHeapNoproc(4);
- return res;
+ return dsig_send_ctl(ctx, ctl);
}
-#if defined(PURIFY)
-# define PURIFY_MSG(msg) \
- purify_printf("%s, line %d: %s", __FILE__, __LINE__, msg)
-#elif defined(VALGRIND)
-#include <valgrind/valgrind.h>
-#include <valgrind/memcheck.h>
+struct dist_sequences {
+ ErlHeapFragment hfrag;
+ struct dist_sequences *parent;
+ struct dist_sequences *left;
+ struct dist_sequences *right;
+ char is_red;
-# define PURIFY_MSG(msg) \
- VALGRIND_PRINTF("%s, line %d: %s", __FILE__, __LINE__, msg)
-#else
-# define PURIFY_MSG(msg)
-#endif
+ Uint64 seq_id;
+ int cnt;
+ Sint ctl_len;
+};
+
+#define ERTS_RBT_PREFIX dist_seq
+#define ERTS_RBT_T DistSeqNode
+#define ERTS_RBT_KEY_T Uint
+#define ERTS_RBT_FLAGS_T int
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->parent = NULL; \
+ (T)->left = NULL; \
+ (T)->right = NULL; \
+ (T)->is_red = 0; \
+ } while(0)
+#define ERTS_RBT_IS_RED(T) ((T)->is_red)
+#define ERTS_RBT_SET_RED(T) ((T)->is_red = 1)
+#define ERTS_RBT_IS_BLACK(T) (!ERTS_RBT_IS_RED(T))
+#define ERTS_RBT_SET_BLACK(T) ((T)->is_red = 0)
+#define ERTS_RBT_GET_FLAGS(T) ((T)->is_red)
+#define ERTS_RBT_SET_FLAGS(T, F) ((T)->is_red = F)
+#define ERTS_RBT_GET_PARENT(T) ((T)->parent)
+#define ERTS_RBT_SET_PARENT(T, P) ((T)->parent = P)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->seq_id)
+#define ERTS_RBT_IS_LT(KX, KY) (KX < KY)
+#define ERTS_RBT_IS_EQ(KX, KY) (KX == KY)
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_LOOKUP_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_FOREACH
+#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+
+#include "erl_rbtree.h"
+
+struct erts_dist_seq_tree_foreach_iter_arg {
+ int (*func)(ErtsDistExternal *, void *, Sint);
+ void *arg;
+};
+
+static int
+erts_dist_seq_tree_foreach_iter(DistSeqNode *seq, void *arg, Sint reds)
+{
+ struct erts_dist_seq_tree_foreach_iter_arg *state = arg;
+ return state->func(erts_get_dist_ext(&seq->hfrag), state->arg, reds);
+}
+
+void
+erts_dist_seq_tree_foreach(DistEntry *dep, int (*func)(ErtsDistExternal *, void *, Sint), void *arg)
+{
+ struct erts_dist_seq_tree_foreach_iter_arg state;
+ state.func = func;
+ state.arg = arg;
+ dist_seq_rbt_foreach(dep->sequences, erts_dist_seq_tree_foreach_iter, &state);
+}
+
+static int dist_seq_cleanup(DistSeqNode *seq, void *unused, Sint reds)
+{
+ erts_free_dist_ext_copy(erts_get_dist_ext(&seq->hfrag));
+ free_message_buffer(&seq->hfrag);
+ return 1;
+}
+
+typedef struct {
+ DistSeqNode *root;
+ dist_seq_rbt_yield_state_t rbt_ystate;
+} DistSeqNodeYieldState;
+
+int
+erts_dist_seq_tree_foreach_delete_yielding(DistSeqNode **root,
+ void **vyspp,
+ Sint limit)
+{
+ DistSeqNodeYieldState ys = {*root, ERTS_RBT_YIELD_STAT_INITER};
+ DistSeqNodeYieldState *ysp;
+ int res;
+
+ ysp = (DistSeqNodeYieldState *) *vyspp;
+ if (!ysp) {
+ *root = NULL;
+ ysp = &ys;
+ }
+ res = dist_seq_rbt_foreach_destroy_yielding(&ysp->root,
+ dist_seq_cleanup,
+ NULL,
+ &ysp->rbt_ystate,
+ limit);
+ if (res > 0) {
+ if (ysp != &ys)
+ erts_free(ERTS_ALC_T_SEQ_YIELD_STATE, ysp);
+ *vyspp = NULL;
+ }
+ else {
+
+ if (ysp == &ys) {
+ ysp = erts_alloc(ERTS_ALC_T_SEQ_YIELD_STATE,
+ sizeof(DistSeqNodeYieldState));
+ sys_memcpy((void *) ysp, (void *) &ys,
+ sizeof(DistSeqNodeYieldState));
+ }
+
+ *vyspp = (void *) ysp;
+ }
+
+ return res;
+}
/*
** Input from distribution port.
@@ -1212,10 +1399,13 @@ int erts_net_message(Port *prt,
Uint32 conn_id,
byte *hbuf,
ErlDrvSizeT hlen,
+ Binary *bin,
byte *buf,
ErlDrvSizeT len)
{
- ErtsDistExternal ede;
+ ErtsDistExternal ede, *edep = &ede;
+ ErtsDistExternalData ede_data;
+ ErlHeapFragment *ede_hfrag = NULL;
Sint ctl_len;
Eterm arg;
Eterm from, to;
@@ -1227,10 +1417,8 @@ int erts_net_message(Port *prt,
DeclareTmpHeapNoproc(ctl_default,DIST_CTL_DEFAULT_SIZE);
Eterm* ctl = ctl_default;
ErtsHeapFactory factory;
- Eterm* hp;
Sint type;
Eterm token;
- Eterm token_size;
Uint tuple_arity;
int res;
#ifdef ERTS_DIST_MSG_DBG
@@ -1256,69 +1444,182 @@ int erts_net_message(Port *prt,
}
#ifdef ERTS_RAW_DIST_MSG_DBG
- erts_fprintf(stderr, "<< ");
+ erts_fprintf(dbg_file, "RECV: ");
bw(buf, len);
#endif
- res = erts_prepare_dist_ext(&ede, buf, len, dep, conn_id, dep->cache);
+ ede.data = &ede_data;
+
+ res = erts_prepare_dist_ext(&ede, buf, len, bin, dep, conn_id, dep->cache);
switch (res) {
case ERTS_PREP_DIST_EXT_CLOSED:
return 0; /* Connection not alive; ignore signal... */
case ERTS_PREP_DIST_EXT_FAILED:
#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, "DIST MSG DEBUG: erts_prepare_dist_ext() failed:\n");
+ erts_fprintf(dbg_file, "DIST MSG DEBUG: erts_prepare_dist_ext() failed:\n");
bw(buf, orig_len);
#endif
goto data_error;
case ERTS_PREP_DIST_EXT_SUCCESS:
- ctl_len = erts_decode_dist_ext_size(&ede);
+ ctl_len = erts_decode_dist_ext_size(&ede, 1);
if (ctl_len < 0) {
#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n");
+ erts_fprintf(dbg_file, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n");
bw(buf, orig_len);
#endif
PURIFY_MSG("data error");
goto data_error;
}
+
+ /* A non-fragmented message */
+ if (!ede.data->seq_id) {
+ if (ctl_len > DIST_CTL_DEFAULT_SIZE) {
+ ctl = erts_alloc(ERTS_ALC_T_DCTRL_BUF, ctl_len * sizeof(Eterm));
+ }
+
+ erts_factory_tmp_init(&factory, ctl, ctl_len, ERTS_ALC_T_DCTRL_BUF);
+ break;
+ } else {
+ DistSeqNode *seq;
+ Uint sz = erts_dist_ext_size(&ede);
+ Uint used_sz = ctl_len * sizeof(Eterm);
+
+ /* We calculate the size of the heap fragment to be allocated.
+ The used_size part has to be larger that the ctl data and the
+ DistSeqNode. */
+ if (used_sz + (sizeof(ErlHeapFragment) - sizeof(Eterm)) < sizeof(DistSeqNode))
+ used_sz = sizeof(DistSeqNode) - (sizeof(ErlHeapFragment) - sizeof(Eterm));
+
+ seq = (DistSeqNode *)new_message_buffer((sz + used_sz) / sizeof(Eterm));
+ seq->hfrag.used_size = used_sz / sizeof(Eterm);
+
+ seq->ctl_len = ctl_len;
+ seq->seq_id = ede.data->seq_id;
+ seq->cnt = ede.data->frag_id;
+ if (dist_seq_rbt_lookup_insert(&dep->sequences, seq) != NULL) {
+ free_message_buffer(&seq->hfrag);
+ goto data_error;
+ }
+
+ erts_make_dist_ext_copy(&ede, erts_get_dist_ext(&seq->hfrag));
+
+ if (ede.data->frag_id > 1) {
+ seq->cnt--;
+ return 0;
+ }
+ }
+
+ /* fall through, the first fragment in the sequence was the last fragment */
+ case ERTS_PREP_DIST_EXT_FRAG_CONT: {
+ DistSeqNode *seq = dist_seq_rbt_lookup(dep->sequences, ede.data->seq_id);
+
+ if (!seq)
+ goto data_error;
+
+ /* If we did a fall-though we already did this */
+ if (res == ERTS_PREP_DIST_EXT_FRAG_CONT)
+ erts_dist_ext_frag(&ede_data, erts_get_dist_ext(&seq->hfrag));
+
+ /* Verify that the fragments have arrived in the correct order */
+ if (seq->cnt != ede.data->frag_id)
+ goto data_error;
+
+ seq->cnt--;
+
+ /* Check if this was the last fragment */
+ if (ede.data->frag_id > 1)
+ return 0;
+
+ /* Last fragment arrived, time to dispatch the signal */
+ dist_seq_rbt_delete(&dep->sequences, seq);
+ ctl_len = seq->ctl_len;
+
+ /* Now that we no longer need the DistSeqNode we re-use the heapfragment
+ to decode the ctl msg into. We don't need the ctl message to be in
+ the heapfragment, but we decode into the heapfragment speculatively
+ in case there is a trace token that we need. */
+ erts_factory_heap_frag_init(&factory, &seq->hfrag);
+ edep = erts_get_dist_ext(&seq->hfrag);
+ ede_hfrag = &seq->hfrag;
+
+ /* If the sequence consisted of more than 1 fragment we create one large
+ binary out of all of the fragments. This because erts_decode_ext
+ cannot handle a segmented buffer.
+ TODO: Move this copy to as late as possible, preferably in in the
+ erts_decode_dist_ext in the receiving process.
+ */
+ if (edep->data->frag_id > 1) {
+ Uint sz = 0;
+ Binary *bin;
+ int i;
+ byte *ep;
+
+ for (i = 0; i < edep->data->frag_id; i++)
+ sz += edep->data[i].ext_endp - edep->data[i].extp;
+
+ bin = erts_bin_nrml_alloc(sz);
+ ep = (byte*)bin->orig_bytes;
+
+ for (i = 0; i < edep->data->frag_id; i++) {
+ sys_memcpy(ep, edep->data[i].extp, edep->data[i].ext_endp - edep->data[i].extp);
+ ep += edep->data[i].ext_endp - edep->data[i].extp;
+ erts_bin_release(edep->data[i].binp);
+ edep->data[i].binp = NULL;
+ edep->data[i].extp = NULL;
+ edep->data[i].ext_endp = NULL;
+ }
+
+ edep->data->frag_id = 1;
+ edep->data->extp = (byte*)bin->orig_bytes;
+ edep->data->ext_endp = ep;
+ edep->data->binp = bin;
+ }
+
break;
+ }
default:
ERTS_INTERNAL_ERROR("Unexpected result from erts_prepare_dist_ext()");
break;
}
- if (ctl_len > DIST_CTL_DEFAULT_SIZE) {
- ctl = erts_alloc(ERTS_ALC_T_DCTRL_BUF, ctl_len * sizeof(Eterm));
- }
- hp = ctl;
-
- erts_factory_tmp_init(&factory, ctl, ctl_len, ERTS_ALC_T_DCTRL_BUF);
- arg = erts_decode_dist_ext(&factory, &ede);
+ arg = erts_decode_dist_ext(&factory, edep, 1);
if (is_non_value(arg)) {
#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext(CTL) failed:\n");
+ erts_fprintf(dbg_file, "DIST MSG DEBUG: erts_decode_dist_ext(CTL) failed:\n");
bw(buf, orig_len);
#endif
PURIFY_MSG("data error");
goto decode_error;
}
-#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, "<< CTL: %T\n", arg);
-#endif
+ /* Fill the unused part of the hfrag with a bignum header */
+ if (ede_hfrag && ede_hfrag->mem + ede_hfrag->used_size > factory.hp) {
+ Uint slot = factory.hp - ede_hfrag->mem;
+ ede_hfrag->mem[slot] = make_pos_bignum_header(ede_hfrag->used_size - slot - 1);
+ }
if (is_not_tuple(arg) ||
(tuple = tuple_val(arg), (tuple_arity = arityval(*tuple)) < 1) ||
is_not_small(tuple[1])) {
+#ifdef ERTS_DIST_MSG_DBG
+ if (is_tuple(arg) && arityval(*tuple) > 1)
+ erts_fprintf(dbg_file, "RECV: CTL: %s: %.80T\n",
+ erts_dop_to_string(unsigned_val(tuple[1])), arg);
+#endif
goto invalid_message;
}
- token_size = 0;
+#ifdef ERTS_DIST_MSG_DBG
+ erts_fprintf(dbg_file, "RECV: CTL: %s: %.80T\n",
+ erts_dop_to_string(unsigned_val(tuple[1])), arg);
+#endif
+
token = NIL;
switch (type = unsigned_val(tuple[1])) {
case DOP_LINK: {
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
int code;
if (tuple_arity != 3) {
@@ -1343,10 +1644,7 @@ int erts_net_message(Port *prt,
from, to);
ASSERT(ldp->a.other.item == to);
ASSERT(eq(ldp->b.other.item, from));
-#ifdef DEBUG
- code =
-#endif
- erts_link_dist_insert(&ldp->a, dep->mld);
+ code = erts_link_dist_insert(&ldp->a, dep->mld);
ASSERT(code);
if (erts_proc_sig_send_link(NULL, to, &ldp->b))
@@ -1354,17 +1652,14 @@ int erts_net_message(Port *prt,
/* Failed to send signal; cleanup and reply noproc... */
-#ifdef DEBUG
- code =
-#endif
- erts_link_dist_delete(&ldp->a);
+ code = erts_link_dist_delete(&ldp->a);
ASSERT(code);
erts_link_release_both(ldp);
}
- code = erts_dsig_prepare(&dsd, dep, NULL, 0, ERTS_DSP_NO_LOCK, 0, 0);
+ code = erts_dsig_prepare(&ctx, dep, NULL, 0, ERTS_DSP_NO_LOCK, 1, 1, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
- code = erts_dsig_send_exit(&dsd, to, from, am_noproc);
+ code = erts_dsig_send_exit(&ctx, to, from, am_noproc);
ASSERT(code == ERTS_DSIG_SEND_OK);
}
@@ -1397,7 +1692,7 @@ int erts_net_message(Port *prt,
/* A remote process wants to monitor us, we get:
{DOP_MONITOR_P, Remote pid, local pid or name, ref} */
Eterm pid, name;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
int code;
if (tuple_arity != 4) {
@@ -1452,10 +1747,9 @@ int erts_net_message(Port *prt,
}
- code = erts_dsig_prepare(&dsd, dep, NULL, 0, ERTS_DSP_NO_LOCK, 0, 0);
+ code = erts_dsig_prepare(&ctx, dep, NULL, 0, ERTS_DSP_NO_LOCK, 1, 1, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
- code = erts_dsig_send_m_exit(&dsd, watcher, watched, ref,
- am_noproc);
+ code = erts_dsig_send_m_exit(&ctx, watcher, watched, ref, am_noproc);
ASSERT(code == ERTS_DSIG_SEND_OK);
}
@@ -1514,7 +1808,6 @@ int erts_net_message(Port *prt,
goto invalid_message;
}
- token_size = size_object(tuple[5]);
/* Fall through ... */
case DOP_REG_SEND:
/* {DOP_REG_SEND, From, Cookie, ToName} -- Message */
@@ -1529,7 +1822,7 @@ int erts_net_message(Port *prt,
}
#ifdef ERTS_DIST_MSG_DBG
- dist_msg_dbg(&ede, "MSG", buf, orig_len);
+ dist_msg_dbg(edep, "MSG", buf, orig_len);
#endif
from = tuple[2];
@@ -1539,35 +1832,25 @@ int erts_net_message(Port *prt,
}
rp = erts_whereis_process(NULL, 0, to, 0, 0);
if (rp) {
- Uint xsize = (type == DOP_REG_SEND
- ? 0
- : ERTS_HEAP_FRAG_SIZE(token_size));
ErtsProcLocks locks = 0;
- ErtsDistExternal *ede_copy;
- ede_copy = erts_make_dist_ext_copy(&ede, xsize);
if (type == DOP_REG_SEND) {
token = NIL;
} else {
- ErlHeapFragment *heap_frag;
- ErlOffHeap *ohp;
- ASSERT(xsize);
- heap_frag = erts_dist_ext_trailer(ede_copy);
- ERTS_INIT_HEAP_FRAG(heap_frag, token_size, token_size);
- hp = heap_frag->mem;
- ohp = &heap_frag->off_heap;
token = tuple[5];
- token = copy_struct(token, token_size, &hp, ohp);
}
- erts_queue_dist_message(rp, locks, ede_copy, token, from);
+ erts_queue_dist_message(rp, locks, edep, ede_hfrag, token, from);
+
if (locks)
erts_proc_unlock(rp, locks);
- }
+ } else if (ede_hfrag != NULL) {
+ erts_free_dist_ext_copy(erts_get_dist_ext(ede_hfrag));
+ free_message_buffer(ede_hfrag);
+ }
break;
case DOP_SEND_SENDER_TT: {
- Uint xsize;
case DOP_SEND_TT:
if (tuple_arity != 4) {
@@ -1575,15 +1858,12 @@ int erts_net_message(Port *prt,
}
token = tuple[4];
- token_size = size_object(token);
- xsize = ERTS_HEAP_FRAG_SIZE(token_size);
goto send_common;
case DOP_SEND_SENDER:
case DOP_SEND:
token = NIL;
- xsize = 0;
if (tuple_arity != 3)
goto invalid_message;
@@ -1599,48 +1879,50 @@ int erts_net_message(Port *prt,
: tuple[2] == am_Empty);
#ifdef ERTS_DIST_MSG_DBG
- dist_msg_dbg(&ede, "MSG", buf, orig_len);
+ dist_msg_dbg(edep, "MSG", buf, orig_len);
#endif
to = tuple[3];
if (is_not_pid(to)) {
goto invalid_message;
}
rp = erts_proc_lookup(to);
+
if (rp) {
ErtsProcLocks locks = 0;
- ErtsDistExternal *ede_copy;
-
- ede_copy = erts_make_dist_ext_copy(&ede, xsize);
- if (is_not_nil(token)) {
- ErlHeapFragment *heap_frag;
- ErlOffHeap *ohp;
- ASSERT(xsize);
- heap_frag = erts_dist_ext_trailer(ede_copy);
- ERTS_INIT_HEAP_FRAG(heap_frag, token_size, token_size);
- hp = heap_frag->mem;
- ohp = &heap_frag->off_heap;
- token = copy_struct(token, token_size, &hp, ohp);
- }
- erts_queue_dist_message(rp, locks, ede_copy, token, am_Empty);
+ erts_queue_dist_message(rp, locks, edep, ede_hfrag, token, am_Empty);
if (locks)
erts_proc_unlock(rp, locks);
- }
+ } else if (ede_hfrag != NULL) {
+ erts_free_dist_ext_copy(erts_get_dist_ext(ede_hfrag));
+ free_message_buffer(ede_hfrag);
+ }
+
break;
}
+ case DOP_PAYLOAD_MONITOR_P_EXIT:
case DOP_MONITOR_P_EXIT: {
+
/* We are monitoring a process on the remote node which dies, we get
{DOP_MONITOR_P_EXIT, Remote pid or name, Local pid, ref, reason} */
-
- if (tuple_arity != 5) {
- goto invalid_message;
- }
- watched = tuple[2]; /* remote proc or name which died */
- watcher = tuple[3];
+ watched = tuple[2]; /* remote proc or name which died */
+ watcher = tuple[3];
ref = tuple[4];
- reason = tuple[5];
+
+ if (type == DOP_PAYLOAD_MONITOR_P_EXIT) {
+ if (tuple_arity != 4) {
+ goto invalid_message;
+ }
+ reason = THE_NON_VALUE;
+ } else {
+ if (tuple_arity != 5) {
+ goto invalid_message;
+ }
+ reason = tuple[5];
+ edep = NULL;
+ }
if (is_not_ref(ref))
goto invalid_message;
@@ -1656,64 +1938,124 @@ int erts_net_message(Port *prt,
goto invalid_message;
}
- erts_proc_sig_send_dist_monitor_down(dep, ref, watched,
- watcher, reason);
+ if (!erts_proc_lookup(watcher)) {
+ if (ede_hfrag != NULL) {
+ erts_free_dist_ext_copy(erts_get_dist_ext(ede_hfrag));
+ free_message_buffer(ede_hfrag);
+ }
+ break; /* Process not alive */
+ }
+
+#ifdef ERTS_DIST_MSG_DBG
+ if (reason == THE_NON_VALUE) {
+ dist_msg_dbg(edep, "MSG", buf, orig_len);
+ }
+#endif
+
+ erts_proc_sig_send_dist_monitor_down(
+ dep, ref, watched, watcher, edep, ede_hfrag, reason);
break;
}
+ case DOP_PAYLOAD_EXIT:
+ case DOP_PAYLOAD_EXIT_TT:
case DOP_EXIT_TT:
case DOP_EXIT: {
+
/* 'from', which 'to' is linked to, died */
+ from = tuple[2];
+ to = tuple[3];
+
if (type == DOP_EXIT) {
if (tuple_arity != 4) {
goto invalid_message;
}
-
- from = tuple[2];
- to = tuple[3];
- reason = tuple[4];
token = NIL;
- } else {
+ reason = tuple[4];
+ edep = NULL;
+ } else if (type == DOP_EXIT_TT){
if (tuple_arity != 5) {
goto invalid_message;
}
- from = tuple[2];
- to = tuple[3];
token = tuple[4];
reason = tuple[5];
- }
+ edep = NULL;
+ } else if (type == DOP_PAYLOAD_EXIT) {
+ if (tuple_arity != 3) {
+ goto invalid_message;
+ }
+ token = NIL;
+ reason = THE_NON_VALUE;
+ } else {
+ if (tuple_arity != 4) {
+ goto invalid_message;
+ }
+ token = tuple[4];
+ reason = THE_NON_VALUE;
+ }
if (is_not_external_pid(from)
|| dep != external_pid_dist_entry(from)
|| is_not_internal_pid(to)) {
goto invalid_message;
}
+ if (!erts_proc_lookup(to)) {
+ if (ede_hfrag != NULL) {
+ erts_free_dist_ext_copy(erts_get_dist_ext(ede_hfrag));
+ free_message_buffer(ede_hfrag);
+ }
+ break; /* Process not alive */
+ }
+
+#ifdef ERTS_DIST_MSG_DBG
+ if (reason == THE_NON_VALUE) {
+ dist_msg_dbg(edep, "MSG", buf, orig_len);
+ }
+#endif
+
erts_proc_sig_send_dist_link_exit(dep,
- from, to,
+ from, to, edep, ede_hfrag,
reason, token);
break;
}
+ case DOP_PAYLOAD_EXIT2_TT:
+ case DOP_PAYLOAD_EXIT2:
case DOP_EXIT2_TT:
- case DOP_EXIT2:
+ case DOP_EXIT2: {
+
/* 'from' is send an exit signal to 'to' */
+ from = tuple[2];
+ to = tuple[3];
+
if (type == DOP_EXIT2) {
if (tuple_arity != 4) {
goto invalid_message;
}
- from = tuple[2];
- to = tuple[3];
reason = tuple[4];
token = NIL;
- } else {
+ edep = NULL;
+ } else if (type == DOP_EXIT2_TT) {
if (tuple_arity != 5) {
goto invalid_message;
}
- from = tuple[2];
- to = tuple[3];
token = tuple[4];
reason = tuple[5];
- }
- if (is_not_pid(from)) {
+ edep = NULL;
+ } else if (type == DOP_PAYLOAD_EXIT2) {
+ if (tuple_arity != 3) {
+ goto invalid_message;
+ }
+ reason = THE_NON_VALUE;
+ token = NIL;
+ } else {
+ if (tuple_arity != 4) {
+ goto invalid_message;
+ }
+ reason = THE_NON_VALUE;
+ token = tuple[4];
+ }
+ if (is_not_pid(from)
+ || dep != external_pid_dist_entry(from)) {
goto invalid_message;
}
if (is_not_internal_pid(to)) {
@@ -1725,9 +2067,23 @@ int erts_net_message(Port *prt,
goto invalid_message;
}
- erts_proc_sig_send_exit(NULL, from, to, reason, token, 0);
- break;
+ if (!erts_proc_lookup(to)) {
+ if (ede_hfrag != NULL) {
+ erts_free_dist_ext_copy(erts_get_dist_ext(ede_hfrag));
+ free_message_buffer(ede_hfrag);
+ }
+ break; /* Process not alive */
+ }
+
+#ifdef ERTS_DIST_MSG_DBG
+ if (reason == THE_NON_VALUE) {
+ dist_msg_dbg(edep, "MSG", buf, orig_len);
+ }
+#endif
+ erts_proc_sig_send_dist_exit(dep, from, to, edep, ede_hfrag, reason, token);
+ break;
+ }
case DOP_GROUP_LEADER:
if (tuple_arity != 3) {
goto invalid_message;
@@ -1741,13 +2097,15 @@ int erts_net_message(Port *prt,
(void) erts_proc_sig_send_group_leader(NULL, to, from, NIL);
break;
- default:
+ default:
goto invalid_message;
}
- erts_factory_close(&factory);
- if (ctl != ctl_default) {
- erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
+ if (ede_hfrag == NULL) {
+ erts_factory_close(&factory);
+ if (ctl != ctl_default) {
+ erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
+ }
}
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
ERTS_CHK_NO_PROC_LOCKS;
@@ -1760,9 +2118,14 @@ int erts_net_message(Port *prt,
}
decode_error:
PURIFY_MSG("data error");
- erts_factory_close(&factory);
- if (ctl != ctl_default) {
- erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
+ if (ede_hfrag == NULL) {
+ erts_factory_close(&factory);
+ if (ctl != ctl_default) {
+ erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
+ }
+ } else {
+ erts_free_dist_ext_copy(erts_get_dist_ext(ede_hfrag));
+ free_message_buffer(ede_hfrag);
}
data_error:
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
@@ -1771,18 +2134,21 @@ data_error:
return -1;
}
-static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy)
+static int dsig_send_exit(ErtsDSigSendContext *ctx, Eterm ctl, Eterm msg)
+{
+ ctx->ctl = ctl;
+ ctx->msg = msg;
+ return erts_dsig_send(ctx);
+}
+
+static int dsig_send_ctl(ErtsDSigSendContext *ctx, Eterm ctl)
{
- struct erts_dsig_send_context ctx;
int ret;
- ctx.ctl = ctl;
- ctx.msg = THE_NON_VALUE;
- ctx.force_busy = force_busy;
- ctx.phase = ERTS_DSIG_SEND_PHASE_INIT;
-#ifdef DEBUG
- ctx.reds = 1; /* provoke assert below (no reduction count without msg) */
-#endif
- ret = erts_dsig_send(dsdp, &ctx);
+ ctx->ctl = ctl;
+ ctx->msg = THE_NON_VALUE;
+ ctx->from = THE_NON_VALUE;
+ ctx->reds = 1; /* provoke assert below (no reduction count without msg) */
+ ret = erts_dsig_send(ctx);
ASSERT(ret != ERTS_DSIG_SEND_CONTINUE);
return ret;
}
@@ -1813,7 +2179,117 @@ notify_dist_data(Process *c_p, Eterm pid)
}
int
-erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
+erts_dsig_prepare(ErtsDSigSendContext *ctx,
+ DistEntry *dep,
+ Process *proc,
+ ErtsProcLocks proc_locks,
+ ErtsDSigPrepLock dspl,
+ int no_suspend,
+ int no_trap,
+ int connect)
+{
+ int res;
+
+ if (!erts_is_alive)
+ return ERTS_DSIG_PREP_NOT_ALIVE;
+ if (!dep) {
+ ASSERT(!connect);
+ return ERTS_DSIG_PREP_NOT_CONNECTED;
+ }
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (connect) {
+ erts_proc_lc_might_unlock(proc, proc_locks);
+ }
+#endif
+
+retry:
+ erts_de_rlock(dep);
+
+ if (dep->state == ERTS_DE_STATE_CONNECTED) {
+ res = ERTS_DSIG_PREP_CONNECTED;
+ }
+ else if (dep->state == ERTS_DE_STATE_PENDING) {
+ res = ERTS_DSIG_PREP_PENDING;
+ }
+ else if (dep->state == ERTS_DE_STATE_EXITING) {
+ res = ERTS_DSIG_PREP_NOT_CONNECTED;
+ goto fail;
+ }
+ else if (connect) {
+ ASSERT(dep->state == ERTS_DE_STATE_IDLE);
+ erts_de_runlock(dep);
+ if (!erts_auto_connect(dep, proc, proc_locks)) {
+ return ERTS_DSIG_PREP_NOT_ALIVE;
+ }
+ goto retry;
+ }
+ else {
+ ASSERT(dep->state == ERTS_DE_STATE_IDLE);
+ res = ERTS_DSIG_PREP_NOT_CONNECTED;
+ goto fail;
+ }
+
+ if (no_suspend) {
+ if (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY) {
+ res = ERTS_DSIG_PREP_WOULD_SUSPEND;
+ goto fail;
+ }
+ }
+
+ ctx->c_p = proc;
+ ctx->dep = dep;
+ ctx->deref_dep = 0;
+ ctx->cid = dep->cid;
+ ctx->connection_id = dep->connection_id;
+ ctx->no_suspend = no_suspend;
+ ctx->no_trap = no_trap;
+ ctx->flags = dep->flags;
+ ctx->return_term = am_true;
+ ctx->phase = ERTS_DSIG_SEND_PHASE_INIT;
+ ctx->from = proc ? proc->common.id : am_undefined;
+ ctx->reds = no_trap ? 1 : (Sint) (ERTS_BIF_REDS_LEFT(proc) * TERM_TO_BINARY_LOOP_FACTOR);
+ if (dspl == ERTS_DSP_NO_LOCK)
+ erts_de_runlock(dep);
+ return res;
+
+ fail:
+ erts_de_runlock(dep);
+ return res;
+}
+
+static
+void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
+{
+ DistEntry *dep;
+ Eterm id;
+
+ if (prt) {
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ASSERT((erts_atomic32_read_nob(&prt->state)
+ & ERTS_PORT_SFLGS_DEAD) == 0);
+
+ dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
+ ASSERT(dep);
+ id = prt->common.id;
+ }
+ else {
+ ASSERT(dist_entry);
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx)
+ || erts_lc_rwmtx_is_rwlocked(&dist_entry->rwmtx));
+ ASSERT(is_internal_port(dist_entry->cid));
+
+ dep = dist_entry;
+ id = dep->cid;
+ }
+
+ if (!erts_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1))
+ erts_port_task_schedule(id, &dep->dist_cmd, ERTS_PORT_TASK_DIST_CMD);
+}
+
+
+int
+erts_dsig_send(ErtsDSigSendContext *ctx)
{
int retval;
Sint initial_reds = ctx->reds;
@@ -1822,11 +2298,13 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
while (1) {
switch (ctx->phase) {
case ERTS_DSIG_SEND_PHASE_INIT:
- ctx->flags = dsdp->flags;
- ctx->c_p = dsdp->proc;
+ ctx->flags = ctx->flags;
+ ctx->c_p = ctx->c_p;
- if (!ctx->c_p || dsdp->no_suspend)
- ctx->force_busy = 1;
+ if (!ctx->c_p) {
+ ctx->no_trap = 1;
+ ctx->no_suspend = 1;
+ }
ERTS_LC_ASSERT(!ctx->c_p
|| (ERTS_PROC_LOCK_MAIN
@@ -1845,15 +2323,27 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
}
#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, ">> CTL: %T\n", ctx->ctl);
+ erts_fprintf(dbg_file, "SEND: CTL: %s: %.80T\n",
+ erts_dop_to_string(unsigned_val(tuple_val(ctx->ctl)[1])),
+ ctx->ctl);
if (is_value(ctx->msg))
- erts_fprintf(stderr, " MSG: %T\n", ctx->msg);
+ erts_fprintf(dbg_file, " MSG: %.160T\n", ctx->msg);
#endif
ctx->data_size = ctx->max_finalize_prepend;
erts_reset_atom_cache_map(ctx->acmp);
- erts_encode_dist_ext_size(ctx->ctl, ctx->flags, ctx->acmp, &ctx->data_size);
+ switch (erts_encode_dist_ext_size(ctx->ctl, ctx->flags,
+ ctx->acmp, &ctx->data_size)) {
+ case ERTS_EXT_SZ_OK:
+ break;
+ case ERTS_EXT_SZ_SYSTEM_LIMIT:
+ retval = ERTS_DSIG_SEND_TOO_LRG;
+ goto done;
+ case ERTS_EXT_SZ_YIELD:
+ ERTS_INTERNAL_ERROR("Unexpected yield result");
+ break;
+ }
if (is_non_value(ctx->msg)) {
ctx->phase = ERTS_DSIG_SEND_PHASE_ALLOC;
break;
@@ -1863,26 +2353,61 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
ctx->u.sc.level = 0;
ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_SIZE;
- case ERTS_DSIG_SEND_PHASE_MSG_SIZE:
- if (erts_encode_dist_ext_size_int(ctx->msg, ctx, &ctx->data_size)) {
- retval = ERTS_DSIG_SEND_CONTINUE;
- goto done;
- }
+ case ERTS_DSIG_SEND_PHASE_MSG_SIZE: {
+ ErtsExtSzRes sz_res;
+ sz_res = (!ctx->no_trap
+ ? erts_encode_dist_ext_size_ctx(ctx->msg,
+ ctx,
+ &ctx->data_size)
+ : erts_encode_dist_ext_size(ctx->msg,
+ ctx->flags,
+ ctx->acmp,
+ &ctx->data_size));
+ switch (sz_res) {
+ case ERTS_EXT_SZ_OK:
+ break;
+ case ERTS_EXT_SZ_SYSTEM_LIMIT:
+ retval = ERTS_DSIG_SEND_TOO_LRG;
+ goto done;
+ case ERTS_EXT_SZ_YIELD:
+ if (ctx->no_trap)
+ ERTS_INTERNAL_ERROR("Unexpected yield result");
+ retval = ERTS_DSIG_SEND_CONTINUE;
+ goto done;
+ }
ctx->phase = ERTS_DSIG_SEND_PHASE_ALLOC;
+ }
case ERTS_DSIG_SEND_PHASE_ALLOC:
erts_finalize_atom_cache_map(ctx->acmp, ctx->flags);
- ctx->dhdr_ext_size = erts_encode_ext_dist_header_size(ctx->acmp);
- ctx->data_size += ctx->dhdr_ext_size;
+ if (ctx->flags & DFLAG_FRAGMENTS && is_value(ctx->msg) && is_not_immed(ctx->msg)) {
+ /* Calculate the max number of fragments that are needed */
+ ASSERT(is_pid(ctx->from) &&
+ "from has to be a pid because it is used as sequence id");
+ ctx->fragments = ctx->data_size / ERTS_DIST_FRAGMENT_SIZE + 1;
+ } else
+ ctx->fragments = 1;
+
+ ctx->dhdr_ext_size = erts_encode_ext_dist_header_size(ctx->acmp, ctx->fragments);
- ctx->obuf = alloc_dist_obuf(ctx->data_size);
- ctx->obuf->ext_endp = &ctx->obuf->data[0] + ctx->max_finalize_prepend + ctx->dhdr_ext_size;
+ ctx->obuf = alloc_dist_obuf(
+ ctx->dhdr_ext_size + ctx->data_size +
+ (ctx->fragments-1) * ERTS_DIST_FRAGMENT_HEADER_SIZE,
+ ctx->fragments);
+ ctx->obuf->ext_start = &ctx->obuf->extp[0];
+ ctx->obuf->ext_endp = &ctx->obuf->extp[0] + ctx->max_finalize_prepend
+ + ctx->dhdr_ext_size;
/* Encode internal version of dist header */
- ctx->obuf->extp = erts_encode_ext_dist_header_setup(ctx->obuf->ext_endp, ctx->acmp);
+ ctx->obuf->extp = erts_encode_ext_dist_header_setup(
+ ctx->obuf->ext_endp, ctx->acmp, ctx->fragments, ctx->from);
/* Encode control message */
erts_encode_dist_ext(ctx->ctl, &ctx->obuf->ext_endp, ctx->flags, ctx->acmp, NULL, NULL);
+
+ ctx->obuf->hdrp = NULL;
+ ctx->obuf->hdr_endp = NULL;
+
if (is_non_value(ctx->msg)) {
ctx->obuf->msg_start = NULL;
ctx->phase = ERTS_DSIG_SEND_PHASE_FIN;
@@ -1896,44 +2421,137 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_ENCODE;
case ERTS_DSIG_SEND_PHASE_MSG_ENCODE:
- if (erts_encode_dist_ext(ctx->msg, &ctx->obuf->ext_endp, ctx->flags, ctx->acmp, &ctx->u.ec, &ctx->reds)) {
- retval = ERTS_DSIG_SEND_CONTINUE;
- goto done;
- }
+ if (!ctx->no_trap) {
+ if (erts_encode_dist_ext(ctx->msg, &ctx->obuf->ext_endp, ctx->flags,
+ ctx->acmp, &ctx->u.ec, &ctx->reds)) {
+ retval = ERTS_DSIG_SEND_CONTINUE;
+ goto done;
+ }
+ } else {
+ erts_encode_dist_ext(ctx->msg, &ctx->obuf->ext_endp, ctx->flags,
+ ctx->acmp, NULL, NULL);
+ }
- ctx->phase = ERTS_DSIG_SEND_PHASE_FIN;
+ ctx->phase = ERTS_DSIG_SEND_PHASE_FIN;
case ERTS_DSIG_SEND_PHASE_FIN: {
- DistEntry *dep = dsdp->dep;
- int suspended = 0;
- int resume = 0;
ASSERT(ctx->obuf->extp < ctx->obuf->ext_endp);
- ASSERT(&ctx->obuf->data[0] <= ctx->obuf->extp - ctx->max_finalize_prepend);
- ASSERT(ctx->obuf->ext_endp <= &ctx->obuf->data[0] + ctx->data_size);
+ ASSERT(ctx->obuf->ext_startp <= ctx->obuf->extp - ctx->max_finalize_prepend);
+ ASSERT(ctx->obuf->ext_endp <= (byte*)ctx->obuf->ext_startp + ctx->data_size + ctx->dhdr_ext_size);
ctx->data_size = ctx->obuf->ext_endp - ctx->obuf->extp;
ctx->obuf->hopefull_flags = ctx->u.ec.hopefull_flags;
- /*
+
+ if (ctx->fragments > 1) {
+ int fin_fragments;
+ int i;
+ byte *msg = ctx->obuf->msg_start,
+ *msg_end = ctx->obuf->ext_endp,
+ *hdrp = msg_end;
+
+ ASSERT((ctx->obuf->hopefull_flags & ctx->flags) == ctx->obuf->hopefull_flags);
+ ASSERT(get_int64(ctx->obuf->extp + 1 + 1 + 8) == ctx->fragments);
+
+ /* Now that encoding is done we know how large the term will
+ be so we adjust the number of fragments to send. Note that
+ this can mean that only 1 fragment is sent. */
+ fin_fragments = (ctx->obuf->ext_endp - ctx->obuf->msg_start + ERTS_DIST_FRAGMENT_SIZE-1) /
+ ERTS_DIST_FRAGMENT_SIZE - 1;
+
+ /* Update the frag_id in the DIST_FRAG_HEADER */
+ put_int64(fin_fragments+1, ctx->obuf->extp + 1 + 1 + 8);
+
+ if (fin_fragments > 0)
+ msg += ERTS_DIST_FRAGMENT_SIZE;
+ else
+ msg = msg_end;
+ ctx->obuf->next = &ctx->obuf[1];
+ ctx->obuf->ext_endp = msg;
+
+ /* Loop through all fragments, updating the output buffers
+ to be correct and also writing the DIST_FRAG_CONT header. */
+ for (i = 1; i < fin_fragments + 1; i++) {
+ ctx->obuf[i].hopefull_flags = 0;
+ ctx->obuf[i].extp = msg;
+ ctx->obuf[i].ext_start = msg;
+ if (msg + ERTS_DIST_FRAGMENT_SIZE > msg_end)
+ ctx->obuf[i].ext_endp = msg_end;
+ else {
+ msg += ERTS_DIST_FRAGMENT_SIZE;
+ ctx->obuf[i].ext_endp = msg;
+ }
+ ASSERT(ctx->obuf[i].ext_endp > ctx->obuf[i].extp);
+ ctx->obuf[i].hdrp = erts_encode_ext_dist_header_fragment(
+ &hdrp, fin_fragments - i + 1, ctx->from);
+ ctx->obuf[i].hdr_endp = hdrp;
+ ctx->obuf[i].next = &ctx->obuf[i+1];
+ }
+ /* If the initial fragment calculation was incorrect we free the
+ remaining output buffers. */
+ for (; i < ctx->fragments; i++) {
+ free_dist_obuf(&ctx->obuf[i]);
+ }
+ if (!ctx->no_trap && !ctx->no_suspend)
+ ctx->reds -= ctx->fragments;
+ ctx->fragments = fin_fragments + 1;
+ }
+
+ ctx->phase = ERTS_DSIG_SEND_PHASE_SEND;
+
+ if (ctx->reds <= 0) {
+ retval = ERTS_DSIG_SEND_CONTINUE;
+ goto done;
+ }
+ }
+ case ERTS_DSIG_SEND_PHASE_SEND: {
+ /*
* Signal encoded; now verify that the connection still exists,
* and if so enqueue the signal and schedule it for send.
*/
- ctx->obuf->next = NULL;
+ DistEntry *dep = ctx->dep;
+ int suspended = 0;
+ int resume = 0;
+ int i;
erts_de_rlock(dep);
cid = dep->cid;
if (dep->state == ERTS_DE_STATE_EXITING
|| dep->state == ERTS_DE_STATE_IDLE
- || dep->connection_id != dsdp->connection_id) {
+ || dep->connection_id != ctx->connection_id) {
/* Not the same connection as when we started; drop message... */
erts_de_runlock(dep);
- free_dist_obuf(ctx->obuf);
+ for (i = 0; i < ctx->fragments; i++)
+ free_dist_obuf(&ctx->obuf[i]);
+ ctx->fragments = 0;
}
else {
- Sint qsize;
+ Sint qsize = erts_atomic_read_nob(&dep->qsize);
erts_aint32_t qflgs;
ErtsProcList *plp = NULL;
Eterm notify_proc = NIL;
- Sint obsz = size_obuf(ctx->obuf);
+ Sint obsz;
+ int fragments;
+
+ /* Calculate how many fragments to send. This depends on
+ the available space in the distr queue and the amount
+ of remaining reductions. */
+ for (fragments = 0, obsz = 0;
+ fragments < ctx->fragments &&
+ ((ctx->reds > 0 && (qsize + obsz) < erts_dist_buf_busy_limit) ||
+ ctx->no_trap || ctx->no_suspend);
+ fragments++) {
+#ifdef DEBUG
+ int reds = 100;
+#else
+ int reds = 10;
+#endif
+ if (!ctx->no_trap && !ctx->no_suspend)
+ ctx->reds -= reds;
+ obsz += size_obuf(&ctx->obuf[fragments]);
+ }
+
+ ASSERT(fragments == ctx->fragments ||
+ (!ctx->no_trap && !ctx->no_suspend));
erts_mtx_lock(&dep->qlock);
qsize = erts_atomic_add_read_nob(&dep->qsize, (erts_aint_t) obsz);
@@ -1954,7 +2572,7 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
/* else: requester will send itself the message... */
qflgs &= ~ERTS_DE_QFLG_REQ_INFO;
}
- if (!ctx->force_busy && (qflgs & ERTS_DE_QFLG_BUSY)) {
+ if (!ctx->no_suspend && (qflgs & ERTS_DE_QFLG_BUSY)) {
erts_mtx_unlock(&dep->qlock);
plp = erts_proclist_create(ctx->c_p);
@@ -1963,14 +2581,27 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
erts_mtx_lock(&dep->qlock);
}
- /* Enqueue obuf on dist entry */
- if (dep->out_queue.last)
- dep->out_queue.last->next = ctx->obuf;
- else
- dep->out_queue.first = ctx->obuf;
- dep->out_queue.last = ctx->obuf;
+ if (fragments > 1) {
+ if (!ctx->obuf->hdrp) {
+ ASSERT(get_int64(ctx->obuf->extp + 10) == ctx->fragments);
+ } else {
+ ASSERT(get_int64(ctx->obuf->hdrp + 10) == ctx->fragments);
+ }
+ }
+
+ if (fragments) {
+ ctx->obuf[fragments-1].next = NULL;
+ if (dep->out_queue.last)
+ dep->out_queue.last->next = ctx->obuf;
+ else
+ dep->out_queue.first = ctx->obuf;
+ dep->out_queue.last = &ctx->obuf[fragments-1];
+
+ ctx->fragments -= fragments;
+ ctx->obuf = &ctx->obuf[fragments];
+ }
- if (!ctx->force_busy) {
+ if (!ctx->no_suspend) {
qflgs = erts_atomic32_read_nob(&dep->qflgs);
if (!(qflgs & ERTS_DE_QFLG_BUSY)) {
if (suspended)
@@ -2017,6 +2648,13 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
* erroneously scheduled when it shouldn't be.
*/
}
+ /* More fragments left to be sent, yield and re-schedule */
+ if (ctx->fragments) {
+ retval = ERTS_DSIG_SEND_CONTINUE;
+ if (!resume && erts_system_monitor_flags.busy_dist_port)
+ monitor_generic(ctx->c_p, am_busy_dist_port, cid);
+ goto done;
+ }
}
ctx->obuf = NULL;
@@ -2101,9 +2739,9 @@ static Uint
dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
{
int fpe_was_unmasked;
- ErlDrvSizeT size;
- SysIOVec iov[2];
- ErlDrvBinary* bv[2];
+ ErlDrvSizeT size = 0;
+ SysIOVec iov[3];
+ ErlDrvBinary* bv[3];
ErlIOVec eiov;
ERTS_CHK_NO_PROC_LOCKS;
@@ -2118,12 +2756,31 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
eiov.vsize = 1;
}
else {
- size = obuf->ext_endp - obuf->extp;
+ int i = 1;
eiov.vsize = 2;
- iov[1].iov_base = obuf->extp;
- iov[1].iov_len = size;
- bv[1] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf));
+ if (obuf->hdrp) {
+ eiov.vsize = 3;
+ iov[i].iov_base = obuf->hdrp;
+ iov[i].iov_len = obuf->hdr_endp - obuf->hdrp;
+ size += iov[i].iov_len;
+ bv[i] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf));
+#ifdef ERTS_RAW_DIST_MSG_DBG
+ erts_fprintf(dbg_file, "SEND: ");
+ bw(iov[i].iov_base, iov[i].iov_len);
+#endif
+ i++;
+
+ }
+
+ iov[i].iov_base = obuf->extp;
+ iov[i].iov_len = obuf->ext_endp - obuf->extp;
+#ifdef ERTS_RAW_DIST_MSG_DBG
+ erts_fprintf(dbg_file, "SEND: ");
+ bw(iov[i].iov_base, iov[i].iov_len);
+#endif
+ size += iov[i].iov_len;
+ bv[i] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf));
}
eiov.size = size;
@@ -2230,6 +2887,23 @@ erts_dist_command(Port *prt, int initial_reds)
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
+#ifdef DEBUG
+ {
+ Uint sz = 0;
+ ErtsDistOutputBuf *curr = oq.first;
+ while (curr) {
+ sz += size_obuf(curr);
+ curr = curr->next;
+ }
+ curr = foq.first;
+ while (curr) {
+ sz += size_obuf(curr);
+ curr = curr->next;
+ }
+ ASSERT(sz <= erts_atomic_read_nob(&dep->qsize));
+ }
+#endif
+
sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
if (reds < 0)
@@ -2243,10 +2917,6 @@ erts_dist_command(Port *prt, int initial_reds)
size = (*send)(prt, foq.first);
erts_atomic64_inc_nob(&dep->out);
esdp->io.out += (Uint64) size;
-#ifdef ERTS_RAW_DIST_MSG_DBG
- erts_fprintf(stderr, ">> ");
- bw(foq.first->extp, size);
-#endif
reds -= ERTS_PORT_REDS_DIST_CMD_DATA(size);
fob = foq.first;
obufsize += size_obuf(fob);
@@ -2271,7 +2941,9 @@ erts_dist_command(Port *prt, int initial_reds)
ob = oq.first;
ASSERT(ob);
do {
+ obufsize += size_obuf(ob);
reds = erts_encode_ext_dist_header_finalize(ob, dep, flags, reds);
+ obufsize -= size_obuf(ob);
if (reds < 0)
break;
last_finalized = ob;
@@ -2310,20 +2982,18 @@ erts_dist_command(Port *prt, int initial_reds)
while (oq.first && !preempt) {
ErtsDistOutputBuf *fob;
Uint size;
+ obufsize += size_obuf(oq.first);
reds = erts_encode_ext_dist_header_finalize(oq.first, dep, flags, reds);
+ obufsize -= size_obuf(oq.first);
if (reds < 0) {
preempt = 1;
break;
}
- ASSERT(&oq.first->data[0] <= oq.first->extp
- && oq.first->extp <= oq.first->ext_endp);
+ ASSERT(oq.first->bin->orig_bytes <= (char*)oq.first->extp
+ && oq.first->extp <= oq.first->ext_endp);
size = (*send)(prt, oq.first);
erts_atomic64_inc_nob(&dep->out);
esdp->io.out += (Uint64) size;
-#ifdef ERTS_RAW_DIST_MSG_DBG
- erts_fprintf(stderr, ">> ");
- bw(oq.first->extp, size);
-#endif
reds -= ERTS_PORT_REDS_DIST_CMD_DATA(size);
fob = oq.first;
obufsize += size_obuf(fob);
@@ -2460,100 +3130,6 @@ erts_dist_command(Port *prt, int initial_reds)
goto done;
}
-#if 0
-
-int
-dist_data_finalize(Process *c_p, int reds_limit)
-{
- int reds = 5;
- DistEntry *dep = ;
- ErtsDistOutputQueue oq, foq;
- ErtsDistOutputBuf *ob;
- int preempt;
-
-
- erts_mtx_lock(&dep->qlock);
- flags = dep->flags;
- oq.first = dep->out_queue.first;
- oq.last = dep->out_queue.last;
- dep->out_queue.first = NULL;
- dep->out_queue.last = NULL;
- erts_mtx_unlock(&dep->qlock);
-
- if (!oq.first) {
- ASSERT(!oq.last);
- oq.first = dep->tmp_out_queue.first;
- oq.last = dep->tmp_out_queue.last;
- }
- else {
- ErtsDistOutputBuf *f, *l;
- ASSERT(oq.last);
- if (dep->tmp_out_queue.last) {
- dep->tmp_out_queue.last->next = oq.first;
- oq.first = dep->tmp_out_queue.first;
- }
- }
-
- if (!oq.first) {
- /* Nothing to do... */
- ASSERT(!oq.last);
- return reds;
- }
-
- foq.first = dep->finalized_out_queue.first;
- foq.last = dep->finalized_out_queue.last;
-
- preempt = 0;
- ob = oq.first;
- ASSERT(ob);
-
- do {
- ob->extp = erts_encode_ext_dist_header_finalize(ob->extp,
- dep->cache,
- flags);
- if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
- *--ob->extp = PASS_THROUGH; /* Old node; 'pass through'
- needed */
- ASSERT(&ob->data[0] <= ob->extp && ob->extp < ob->ext_endp);
- reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE;
- preempt = reds > reds_limit;
- if (preempt)
- break;
- ob = ob->next;
- } while (ob);
- /*
- * At least one buffer was finalized; if we got preempted,
- * ob points to the last buffer that we finalized.
- */
- if (foq.last)
- foq.last->next = oq.first;
- else
- foq.first = oq.first;
- if (!preempt) {
- /* All buffers finalized */
- foq.last = oq.last;
- oq.first = oq.last = NULL;
- }
- else {
- /* Not all buffers finalized; split oq. */
- foq.last = ob;
- oq.first = ob->next;
- if (oq.first)
- ob->next = NULL;
- else
- oq.last = NULL;
- }
-
- dep->finalized_out_queue.first = foq.first;
- dep->finalized_out_queue.last = foq.last;
- dep->tmp_out_queue.first = oq.first;
- dep->tmp_out_queue.last = oq.last;
-
- return reds;
-}
-
-#endif
-
BIF_RETTYPE
dist_ctrl_get_data_notification_1(BIF_ALIST_1)
{
@@ -2622,6 +3198,7 @@ dist_ctrl_put_data_2(BIF_ALIST_2)
ErlDrvSizeT size;
Eterm input_handler;
Uint32 conn_id;
+ Binary *bin = NULL;
if (is_binary(BIF_ARG_2))
size = binary_size(BIF_ARG_2);
@@ -2647,13 +3224,27 @@ dist_ctrl_put_data_2(BIF_ALIST_2)
if (size != 0) {
byte *data, *temp_alloc = NULL;
- data = (byte *) erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc);
+ if (binary_bitoffset(BIF_ARG_2))
+ data = (byte *) erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc);
+ else {
+ Eterm real_bin;
+ ProcBin *proc_bin;
+ Uint offset, bitoffs, bitsize;
+
+ ERTS_GET_REAL_BIN(BIF_ARG_2, real_bin, offset, bitoffs, bitsize);
+ ASSERT(bitoffs == 0);
+ data = binary_bytes(real_bin) + offset;
+ proc_bin = (ProcBin *)binary_val(real_bin);
+ if (proc_bin->thing_word == HEADER_PROC_BIN)
+ bin = proc_bin->val;
+ }
+
if (!data)
BIF_ERROR(BIF_P, BADARG);
erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- (void) erts_net_message(NULL, dep, conn_id, NULL, 0, data, size);
+ (void) erts_net_message(NULL, dep, conn_id, NULL, 0, bin, data, size);
/*
* We ignore any decode failures. On fatal failures the
* connection will be taken down by killing the
@@ -2672,6 +3263,86 @@ dist_ctrl_put_data_2(BIF_ALIST_2)
}
BIF_RETTYPE
+dist_ctrl_set_opt_3(BIF_ALIST_3)
+{
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P);
+ Uint32 conn_id;
+ BIF_RETTYPE ret;
+
+ if (!dep)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id) != dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_de_rlock(dep);
+
+ if (dep->connection_id != conn_id)
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ else {
+
+ switch (BIF_ARG_2) {
+ case am_get_size:
+ ERTS_BIF_PREP_RET(ret, (dep->opts & ERTS_DIST_CTRL_OPT_GET_SIZE
+ ? am_true
+ : am_false));
+ if (BIF_ARG_3 == am_true)
+ dep->opts |= ERTS_DIST_CTRL_OPT_GET_SIZE;
+ else if (BIF_ARG_3 == am_false)
+ dep->opts &= ~ERTS_DIST_CTRL_OPT_GET_SIZE;
+ else
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ break;
+ }
+
+ }
+
+ erts_de_runlock(dep);
+
+ return ret;
+}
+
+BIF_RETTYPE
+dist_ctrl_get_opt_2(BIF_ALIST_2)
+{
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P);
+ Uint32 conn_id;
+ BIF_RETTYPE ret;
+
+ if (!dep)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id) != dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_de_rlock(dep);
+
+ if (dep->connection_id != conn_id)
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ else {
+
+ switch (BIF_ARG_2) {
+ case am_get_size:
+ ERTS_BIF_PREP_RET(ret, (dep->opts & ERTS_DIST_CTRL_OPT_GET_SIZE
+ ? am_true
+ : am_false));
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ break;
+ }
+
+ }
+
+ erts_de_runlock(dep);
+
+ return ret;
+}
+
+BIF_RETTYPE
dist_get_stat_1(BIF_ALIST_1)
{
Sint64 read, write, pend;
@@ -2746,12 +3417,13 @@ dist_ctrl_get_data_1(BIF_ALIST_1)
{
DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P);
const Sint initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
- Sint reds = initial_reds;
+ Sint reds = initial_reds, obufsize = 0;
ErtsDistOutputBuf *obuf;
- Eterm *hp;
+ Eterm *hp, res;
ProcBin *pb;
erts_aint_t qsize;
- Uint32 conn_id;
+ Uint32 conn_id, get_size;
+ Uint hsz = 0, bin_sz;
if (!dep)
BIF_ERROR(BIF_P, EXC_NOTSUP);
@@ -2803,9 +3475,13 @@ dist_ctrl_get_data_1(BIF_ALIST_1)
}
obuf = dep->tmp_out_queue.first;
+ obufsize += size_obuf(obuf);
reds = erts_encode_ext_dist_header_finalize(obuf, dep, dep->flags, reds);
+ obufsize -= size_obuf(obuf);
if (reds < 0) {
erts_de_runlock(dep);
+ if (obufsize)
+ erts_atomic_add_nob(&dep->qsize, (erts_aint_t) -obufsize);
ERTS_BIF_YIELD1(bif_export[BIF_dist_ctrl_get_data_1],
BIF_P, BIF_ARG_1);
}
@@ -2819,17 +3495,60 @@ dist_ctrl_get_data_1(BIF_ALIST_1)
erts_de_runlock(dep);
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- pb = (ProcBin *) (char *) hp;
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = obuf->ext_endp - obuf->extp;
- pb->next = MSO(BIF_P).first;
- MSO(BIF_P).first = (struct erl_off_heap_header*) pb;
- pb->val = ErtsDistOutputBuf2Binary(obuf);
- pb->bytes = (byte*) obuf->extp;
- pb->flags = 0;
-
- qsize = erts_atomic_add_read_nob(&dep->qsize, -size_obuf(obuf));
+ bin_sz = obuf->ext_endp - obuf->extp + obuf->hdr_endp - obuf->hdrp;
+
+ get_size = dep->opts & ERTS_DIST_CTRL_OPT_GET_SIZE;
+ if (get_size) {
+ hsz += 3; /* 2 tuple */
+ if (!IS_USMALL(0, bin_sz))
+ hsz += BIG_UINT_HEAP_SIZE;
+ }
+
+ if (!obuf->hdrp) {
+ hp = HAlloc(BIF_P, PROC_BIN_SIZE + hsz);
+ pb = (ProcBin *) (char *) hp;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = obuf->ext_endp - obuf->extp;
+ pb->next = MSO(BIF_P).first;
+ MSO(BIF_P).first = (struct erl_off_heap_header*) pb;
+ pb->val = ErtsDistOutputBuf2Binary(obuf);
+ pb->bytes = (byte*) obuf->extp;
+ pb->flags = 0;
+ res = make_binary(pb);
+ hp += PROC_BIN_SIZE;
+ } else {
+ hp = HAlloc(BIF_P, PROC_BIN_SIZE * 2 + 4 + hsz);
+ pb = (ProcBin *) (char *) hp;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = obuf->ext_endp - obuf->extp;
+ pb->next = MSO(BIF_P).first;
+ MSO(BIF_P).first = (struct erl_off_heap_header*) pb;
+ pb->val = ErtsDistOutputBuf2Binary(obuf);
+ pb->bytes = (byte*) obuf->extp;
+ pb->flags = 0;
+ hp += PROC_BIN_SIZE;
+
+ res = CONS(hp, make_binary(pb), NIL);
+ hp += 2;
+
+ pb = (ProcBin *) (char *) hp;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = obuf->hdr_endp - obuf->hdrp;
+ pb->next = MSO(BIF_P).first;
+ MSO(BIF_P).first = (struct erl_off_heap_header*) pb;
+ pb->val = ErtsDistOutputBuf2Binary(obuf);
+ erts_refc_inc(&pb->val->intern.refc, 1);
+ pb->bytes = (byte*) obuf->hdrp;
+ pb->flags = 0;
+ hp += PROC_BIN_SIZE;
+ res = CONS(hp, make_binary(pb), res);
+ hp += 2;
+ }
+
+ obufsize += size_obuf(obuf);
+
+ qsize = erts_atomic_add_read_nob(&dep->qsize, (erts_aint_t) -obufsize);
+
ASSERT(qsize >= 0);
if (qsize < erts_dist_buf_busy_limit/2
@@ -2844,7 +3563,18 @@ dist_ctrl_get_data_1(BIF_ALIST_1)
}
}
- BIF_RET2(make_binary(pb), (initial_reds - reds));
+ if (get_size) {
+ Eterm sz_term;
+ if (IS_USMALL(0, bin_sz))
+ sz_term = make_small(bin_sz);
+ else {
+ sz_term = uint_to_big(bin_sz, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ res = TUPLE2(hp, sz_term, res);
+ }
+
+ BIF_RET2(res, (initial_reds - reds));
}
void
@@ -2887,6 +3617,10 @@ static void kill_connection(DistEntry *dep)
void
erts_kill_dist_connection(DistEntry *dep, Uint32 conn_id)
{
+#ifdef ERTS_DIST_MSG_DBG
+ erts_fprintf(dbg_file, "INTR: kill dist conn to %T:%u\n",
+ dep->sysname, conn_id);
+#endif
erts_de_rwlock(dep);
if (conn_id == dep->connection_id
&& dep->state == ERTS_DE_STATE_CONNECTED) {
@@ -2901,7 +3635,7 @@ struct print_to_data {
void *arg;
};
-static void doit_print_monitor_info(ErtsMonitor *mon, void *vptdp)
+static int doit_print_monitor_info(ErtsMonitor *mon, void *vptdp, Sint reds)
{
fmtfn_t to = ((struct print_to_data *) vptdp)->to;
void *arg = ((struct print_to_data *) vptdp)->arg;
@@ -2924,6 +3658,7 @@ static void doit_print_monitor_info(ErtsMonitor *mon, void *vptdp)
else
erts_print(to, arg, "%T\n", mdep->md.origin.other.item);
}
+ return 1;
}
static void print_monitor_info(fmtfn_t to, void *arg, DistEntry *dep)
@@ -2939,12 +3674,13 @@ static void print_monitor_info(fmtfn_t to, void *arg, DistEntry *dep)
}
}
-static void doit_print_link_info(ErtsLink *lnk, void *vptdp)
+static int doit_print_link_info(ErtsLink *lnk, void *vptdp, Sint reds)
{
struct print_to_data *ptdp = vptdp;
ErtsLink *lnk2 = erts_link_to_other(lnk, NULL);
erts_print(ptdp->to, ptdp->arg, "Remote link: %T %T\n",
lnk2->other.item, lnk->other.item);
+ return 1;
}
static void print_link_info(fmtfn_t to, void *arg, DistEntry *dep)
@@ -3546,8 +4282,9 @@ Sint erts_abort_connection_rwunlock(DistEntry* dep)
erts_set_dist_entry_not_connected(dep);
erts_de_rwunlock(dep);
- schedule_con_monitor_link_cleanup(mld, THE_NON_VALUE,
- THE_NON_VALUE, THE_NON_VALUE);
+ schedule_con_monitor_link_seq_cleanup(
+ mld, NULL, THE_NON_VALUE,
+ THE_NON_VALUE, THE_NON_VALUE);
if (resume_procs) {
int resumed = erts_resume_processes(resume_procs);
@@ -3853,16 +4590,16 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options)
}
case am_true: {
- ErtsDSigData dsd;
- dsd.node = Node;
+ ErtsDSigSendContext ctx;
+ ctx.node = Node;
dep = erts_find_or_insert_dist_entry(Node);
if (dep == erts_this_dist_entry)
break;
- switch (erts_dsig_prepare(&dsd, dep, p,
+ switch (erts_dsig_prepare(&ctx, dep, p,
ERTS_PROC_LOCK_MAIN,
- ERTS_DSP_RLOCK, 0, async_connect)) {
+ ERTS_DSP_RLOCK, 0, 0, async_connect)) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
/* Trap to either send 'nodedown' or do passive connection attempt */
@@ -3889,28 +4626,22 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options)
Node);
mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
if (created) {
-#ifdef DEBUG
int inserted =
-#endif
erts_monitor_dist_insert(&mdep->md.target, dep->mld);
- ASSERT(inserted);
+ ASSERT(inserted); (void)inserted;
ASSERT(mdep->dist->connection_id == dep->connection_id);
}
else if (mdep->dist->connection_id != dep->connection_id) {
ErtsMonitorDataExtended *mdep2;
ErtsMonitor *mon2;
-#ifdef DEBUG
int inserted;
-#endif
mdep2 = ((ErtsMonitorDataExtended *)
erts_monitor_create(ERTS_MON_TYPE_NODE, NIL,
p->common.id, Node, NIL));
mon2 = &mdep2->md.origin;
-#ifdef DEBUG
inserted =
-#endif
erts_monitor_dist_insert(&mdep->md.target, dep->mld);
- ASSERT(inserted);
+ ASSERT(inserted); (void)inserted;
ASSERT(mdep2->dist->connection_id == dep->connection_id);
mdep2->uptr.node_monitors = mdep->uptr.node_monitors;
@@ -4166,8 +4897,8 @@ typedef struct {
Uint i;
} ErtsNodesMonitorContext;
-static void
-save_nodes_monitor(ErtsMonitor *mon, void *vctxt)
+static int
+save_nodes_monitor(ErtsMonitor *mon, void *vctxt, Sint reds)
{
ErtsNodesMonitorContext *ctxt = vctxt;
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
@@ -4179,6 +4910,7 @@ save_nodes_monitor(ErtsMonitor *mon, void *vctxt)
ctxt->nmdp[ctxt->i].options = mdp->origin.other.item;
ctxt->i++;
+ return 1;
}
static void
@@ -4311,8 +5043,8 @@ typedef struct {
} ErtsNodesMonitorInfoContext;
-static void
-nodes_monitor_info(ErtsMonitor *mon, void *vctxt)
+static int
+nodes_monitor_info(ErtsMonitor *mon, void *vctxt, Sint reds)
{
ErtsMonitorDataExtended *mdep;
ErtsNodesMonitorInfoContext *ctxt = vctxt;
@@ -4359,6 +5091,7 @@ nodes_monitor_info(ErtsMonitor *mon, void *vctxt)
ctxt->hpp = hpp;
ctxt->szp = szp;
ctxt->res = res;
+ return 1;
}
Eterm
@@ -4413,3 +5146,22 @@ erts_processes_monitoring_nodes(Process *c_p)
return ctxt.res;
}
+
+static void
+print_suspended_on_de(fmtfn_t to, void *to_arg, DistEntry *dep)
+{
+ for (; dep; dep = dep->next) {
+ ErtsProcList *curr = erts_proclist_peek_first(dep->suspended);
+ while (curr) {
+ if (!is_internal_pid(curr->u.pid))
+ print_process_info(to, to_arg, curr->u.p, 0);
+ curr = erts_proclist_peek_next(dep->suspended, curr);
+ }
+ }
+}
+
+void
+erts_dist_print_procs_suspended_on_de(fmtfn_t to, void *to_arg) {
+ print_suspended_on_de(to, to_arg, erts_hidden_dist_entries);
+ print_suspended_on_de(to, to_arg, erts_visible_dist_entries);
+}
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index d4d7874a70..067028634b 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -47,6 +47,8 @@
#define DFLAG_SEND_SENDER 0x80000
#define DFLAG_BIG_SEQTRACE_LABELS 0x100000
#define DFLAG_NO_MAGIC 0x200000 /* internal for pending connection */
+#define DFLAG_EXIT_PAYLOAD 0x400000
+#define DFLAG_FRAGMENTS 0x800000
/* Mandatory flags for distribution */
#define DFLAG_DIST_MANDATORY (DFLAG_EXTENDED_REFERENCES \
@@ -75,7 +77,9 @@
| DFLAG_MAP_TAG \
| DFLAG_BIG_CREATION \
| DFLAG_SEND_SENDER \
- | DFLAG_BIG_SEQTRACE_LABELS)
+ | DFLAG_BIG_SEQTRACE_LABELS \
+ | DFLAG_EXIT_PAYLOAD \
+ | DFLAG_FRAGMENTS)
/* Flags addable by local distr implementations */
#define DFLAG_DIST_ADDABLE DFLAG_DIST_DEFAULT
@@ -99,26 +103,35 @@
| DFLAG_BIG_CREATION)
/* opcodes used in distribution messages */
-#define DOP_LINK 1
-#define DOP_SEND 2
-#define DOP_EXIT 3
-#define DOP_UNLINK 4
+enum dop {
+ DOP_LINK = 1,
+ DOP_SEND = 2,
+ DOP_EXIT = 3,
+ DOP_UNLINK = 4,
/* Ancient DOP_NODE_LINK (5) was here, can be reused */
-#define DOP_REG_SEND 6
-#define DOP_GROUP_LEADER 7
-#define DOP_EXIT2 8
-
-#define DOP_SEND_TT 12
-#define DOP_EXIT_TT 13
-#define DOP_REG_SEND_TT 16
-#define DOP_EXIT2_TT 18
-
-#define DOP_MONITOR_P 19
-#define DOP_DEMONITOR_P 20
-#define DOP_MONITOR_P_EXIT 21
-
-#define DOP_SEND_SENDER 22
-#define DOP_SEND_SENDER_TT 23
+ DOP_REG_SEND = 6,
+ DOP_GROUP_LEADER = 7,
+ DOP_EXIT2 = 8,
+
+ DOP_SEND_TT = 12,
+ DOP_EXIT_TT = 13,
+ DOP_REG_SEND_TT = 16,
+ DOP_EXIT2_TT = 18,
+
+ DOP_MONITOR_P = 19,
+ DOP_DEMONITOR_P = 20,
+ DOP_MONITOR_P_EXIT = 21,
+
+ DOP_SEND_SENDER = 22,
+ DOP_SEND_SENDER_TT = 23,
+
+ /* These are used when DFLAG_EXIT_PAYLOAD is detected */
+ DOP_PAYLOAD_EXIT = 24,
+ DOP_PAYLOAD_EXIT_TT = 25,
+ DOP_PAYLOAD_EXIT2 = 26,
+ DOP_PAYLOAD_EXIT2_TT = 27,
+ DOP_PAYLOAD_MONITOR_P_EXIT = 28
+};
/* distribution trap functions */
extern Export* dmonitor_node_trap;
@@ -129,15 +142,15 @@ typedef enum {
} ErtsDSigPrepLock;
-typedef struct {
- Process *proc;
- DistEntry *dep;
- Eterm node; /* used if dep == NULL */
- Eterm cid;
- Eterm connection_id;
- int no_suspend;
- Uint32 flags;
-} ErtsDSigData;
+/* Must be larger or equal to 16 */
+#ifdef DEBUG
+#define ERTS_DIST_FRAGMENT_SIZE 16
+#else
+/* This should be made configurable */
+#define ERTS_DIST_FRAGMENT_SIZE (64 * 1024)
+#endif
+
+#define ERTS_DIST_FRAGMENT_HEADER_SIZE (1 + 1 + 8 + 8) /* magic, header, seq id, frag id*/
#define ERTS_DE_BUSY_LIMIT (1024*1024)
extern int erts_dist_buf_busy_limit;
@@ -159,123 +172,8 @@ extern int erts_is_alive;
/* Pending connection; signals can be enqueued */
#define ERTS_DSIG_PREP_PENDING 4
-ERTS_GLB_INLINE int erts_dsig_prepare(ErtsDSigData *,
- DistEntry*,
- Process *,
- ErtsProcLocks,
- ErtsDSigPrepLock,
- int,
- int);
-
-ERTS_GLB_INLINE
-void erts_schedule_dist_command(Port *, DistEntry *);
-
-int erts_auto_connect(DistEntry* dep, Process *proc, ErtsProcLocks proc_locks);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE int
-erts_dsig_prepare(ErtsDSigData *dsdp,
- DistEntry *dep,
- Process *proc,
- ErtsProcLocks proc_locks,
- ErtsDSigPrepLock dspl,
- int no_suspend,
- int connect)
-{
- int res;
-
- if (!erts_is_alive)
- return ERTS_DSIG_PREP_NOT_ALIVE;
- if (!dep) {
- ASSERT(!connect);
- return ERTS_DSIG_PREP_NOT_CONNECTED;
- }
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (connect) {
- erts_proc_lc_might_unlock(proc, proc_locks);
- }
-#endif
-
-retry:
- erts_de_rlock(dep);
-
- if (dep->state == ERTS_DE_STATE_CONNECTED) {
- res = ERTS_DSIG_PREP_CONNECTED;
- }
- else if (dep->state == ERTS_DE_STATE_PENDING) {
- res = ERTS_DSIG_PREP_PENDING;
- }
- else if (dep->state == ERTS_DE_STATE_EXITING) {
- res = ERTS_DSIG_PREP_NOT_CONNECTED;
- goto fail;
- }
- else if (connect) {
- ASSERT(dep->state == ERTS_DE_STATE_IDLE);
- erts_de_runlock(dep);
- if (!erts_auto_connect(dep, proc, proc_locks)) {
- return ERTS_DSIG_PREP_NOT_ALIVE;
- }
- goto retry;
- }
- else {
- ASSERT(dep->state == ERTS_DE_STATE_IDLE);
- res = ERTS_DSIG_PREP_NOT_CONNECTED;
- goto fail;
- }
-
- if (no_suspend) {
- if (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY) {
- res = ERTS_DSIG_PREP_WOULD_SUSPEND;
- goto fail;
- }
- }
- dsdp->proc = proc;
- dsdp->dep = dep;
- dsdp->cid = dep->cid;
- dsdp->connection_id = dep->connection_id;
- dsdp->no_suspend = no_suspend;
- dsdp->flags = dep->flags;
- if (dspl == ERTS_DSP_NO_LOCK)
- erts_de_runlock(dep);
- return res;
-
- fail:
- erts_de_runlock(dep);
- return res;
-}
-
-ERTS_GLB_INLINE
-void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
-{
- DistEntry *dep;
- Eterm id;
-
- if (prt) {
- ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
- ASSERT((erts_atomic32_read_nob(&prt->state)
- & ERTS_PORT_SFLGS_DEAD) == 0);
-
- dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
- ASSERT(dep);
- id = prt->common.id;
- }
- else {
- ASSERT(dist_entry);
- ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx)
- || erts_lc_rwmtx_is_rwlocked(&dist_entry->rwmtx));
- ASSERT(is_internal_port(dist_entry->cid));
-
- dep = dist_entry;
- id = dep->cid;
- }
-
- if (!erts_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1))
- erts_port_task_schedule(id, &dep->dist_cmd, ERTS_PORT_TASK_DIST_CMD);
-}
-
-#endif
+/* dist_ctrl_{g,s}et_option/2 */
+#define ERTS_DIST_CTRL_OPT_GET_SIZE ((Uint32) (1 << 0))
#ifdef DEBUG
#define ERTS_DBG_CHK_NO_DIST_LNK(D, R, L) \
@@ -336,41 +234,45 @@ enum erts_dsig_send_phase {
ERTS_DSIG_SEND_PHASE_MSG_SIZE,
ERTS_DSIG_SEND_PHASE_ALLOC,
ERTS_DSIG_SEND_PHASE_MSG_ENCODE,
- ERTS_DSIG_SEND_PHASE_FIN
+ ERTS_DSIG_SEND_PHASE_FIN,
+ ERTS_DSIG_SEND_PHASE_SEND
};
-struct erts_dsig_send_context {
- enum erts_dsig_send_phase phase;
- Sint reds;
+typedef struct erts_dsig_send_context {
+ int connect;
+ int no_suspend;
+ int no_trap;
Eterm ctl;
Eterm msg;
- int force_busy;
+ Eterm from;
+ Eterm ctl_heap[6];
+ Eterm return_term;
+
+ DistEntry *dep;
+ Eterm node; /* used if dep == NULL */
+ Eterm cid;
+ Eterm connection_id;
+ int deref_dep;
+
+ enum erts_dsig_send_phase phase;
+ Sint reds;
+
Uint32 max_finalize_prepend;
Uint data_size, dhdr_ext_size;
ErtsAtomCacheMap *acmp;
ErtsDistOutputBuf *obuf;
+ Uint fragments;
Uint32 flags;
Process *c_p;
union {
TTBSizeContext sc;
TTBEncodeContext ec;
}u;
-};
-typedef struct {
- int suspend;
- int connect;
-
- Eterm ctl_heap[6];
- ErtsDSigData dsd;
- DistEntry *dep;
- int deref_dep;
- struct erts_dsig_send_context dss;
-
- Eterm return_term;
-}ErtsSendContext;
+} ErtsDSigSendContext;
+typedef struct dist_sequences DistSeqNode;
/*
* erts_dsig_send_* return values.
@@ -378,22 +280,23 @@ typedef struct {
#define ERTS_DSIG_SEND_OK 0
#define ERTS_DSIG_SEND_YIELD 1
#define ERTS_DSIG_SEND_CONTINUE 2
-
-extern int erts_dsig_send_link(ErtsDSigData *, Eterm, Eterm);
-extern int erts_dsig_send_msg(Eterm, Eterm, ErtsSendContext*);
-extern int erts_dsig_send_exit_tt(ErtsDSigData *, Eterm, Eterm, Eterm, Eterm);
-extern int erts_dsig_send_unlink(ErtsDSigData *, Eterm, Eterm);
-extern int erts_dsig_send_reg_msg(Eterm, Eterm, ErtsSendContext*);
-extern int erts_dsig_send_group_leader(ErtsDSigData *, Eterm, Eterm);
-extern int erts_dsig_send_exit(ErtsDSigData *, Eterm, Eterm, Eterm);
-extern int erts_dsig_send_exit2(ErtsDSigData *, Eterm, Eterm, Eterm);
-extern int erts_dsig_send_demonitor(ErtsDSigData *, Eterm, Eterm, Eterm, int);
-extern int erts_dsig_send_monitor(ErtsDSigData *, Eterm, Eterm, Eterm);
-extern int erts_dsig_send_m_exit(ErtsDSigData *, Eterm, Eterm, Eterm, Eterm);
-
-extern int erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx);
+#define ERTS_DSIG_SEND_TOO_LRG 3
+
+extern int erts_dsig_send_msg(ErtsDSigSendContext*, Eterm, Eterm);
+extern int erts_dsig_send_reg_msg(ErtsDSigSendContext*, Eterm, Eterm);
+extern int erts_dsig_send_link(ErtsDSigSendContext *, Eterm, Eterm);
+extern int erts_dsig_send_exit_tt(ErtsDSigSendContext *, Eterm, Eterm, Eterm, Eterm);
+extern int erts_dsig_send_unlink(ErtsDSigSendContext *, Eterm, Eterm);
+extern int erts_dsig_send_group_leader(ErtsDSigSendContext *, Eterm, Eterm);
+extern int erts_dsig_send_exit(ErtsDSigSendContext *, Eterm, Eterm, Eterm);
+extern int erts_dsig_send_exit2(ErtsDSigSendContext *, Eterm, Eterm, Eterm);
+extern int erts_dsig_send_demonitor(ErtsDSigSendContext *, Eterm, Eterm, Eterm);
+extern int erts_dsig_send_monitor(ErtsDSigSendContext *, Eterm, Eterm, Eterm);
+extern int erts_dsig_send_m_exit(ErtsDSigSendContext *, Eterm, Eterm, Eterm, Eterm);
+
+extern int erts_dsig_send(ErtsDSigSendContext *dsdp);
extern int erts_dsend_context_dtor(Binary*);
-extern Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx);
+extern Eterm erts_dsend_export_trap_context(Process* p, ErtsDSigSendContext* ctx);
extern int erts_dist_command(Port *prt, int reds);
extern void erts_dist_port_not_busy(Port *prt);
@@ -403,5 +306,19 @@ extern Uint erts_dist_cache_size(void);
extern Sint erts_abort_connection_rwunlock(DistEntry *dep);
+extern void erts_dist_seq_tree_foreach(
+ DistEntry *dep,
+ int (*func)(ErtsDistExternal *, void*, Sint), void *args);
+
+extern int erts_dsig_prepare(ErtsDSigSendContext *,
+ DistEntry*,
+ Process *,
+ ErtsProcLocks,
+ ErtsDSigPrepLock,
+ int,
+ int,
+ int);
+void erts_dist_print_procs_suspended_on_de(fmtfn_t to, void *to_arg);
+int erts_auto_connect(DistEntry* dep, Process *proc, ErtsProcLocks proc_locks);
#endif
diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c
index 38289ea78a..f07137c883 100644
--- a/erts/emulator/beam/erl_afit_alloc.c
+++ b/erts/emulator/beam/erl_afit_alloc.c
@@ -102,6 +102,8 @@ erts_afalc_start(AFAllctr_t *afallctr,
allctr->add_mbc = NULL;
allctr->remove_mbc = NULL;
allctr->largest_fblk_in_mbc = NULL;
+ allctr->first_fblk_in_mbc = NULL;
+ allctr->next_fblk_in_mbc = NULL;
allctr->init_atoms = init_atoms;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 9e36d5e0d1..b9f0334172 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -64,9 +64,6 @@
# error "Too many schedulers; cannot create that many pref alloc instances"
#endif
-#define ERTS_ALC_FIX_TYPE_IX(T) \
- (ERTS_ALC_T2N((T)) - ERTS_ALC_N_MIN_A_FIXED_SIZE)
-
#define ERTS_ALC_DEFAULT_MAX_THR_PREF ERTS_MAX_NO_OF_SCHEDULERS
#if defined(SMALL_MEMORY) || defined(PURIFY) || defined(VALGRIND)
@@ -156,20 +153,13 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq,
ErtsAlcType_t erts_fix_core_allocator_ix;
-enum allctr_type {
- GOODFIT,
- BESTFIT,
- AFIT,
- FIRSTFIT
-};
-
struct au_init {
int enable;
int thr_spec;
int disable_allowed;
int thr_spec_allowed;
int carrier_migration_allowed;
- enum allctr_type atype;
+ ErtsAlcStrat_t astrat;
struct {
AllctrInit_t util;
GFAllctrInit_t gf;
@@ -219,7 +209,9 @@ typedef struct {
struct au_init test_alloc;
} erts_alc_hndl_args_init_t;
-#define ERTS_AU_INIT__ {0, 0, 1, 1, 1, GOODFIT, DEFAULT_ALLCTR_INIT, {1,1,1,1}}
+#define ERTS_AU_INIT__ {0, 0, 1, 1, 1, \
+ ERTS_ALC_S_GOODFIT, DEFAULT_ALLCTR_INIT, \
+ {1,1,1,1}}
#define SET_DEFAULT_ALLOC_OPTS(IP) \
do { \
@@ -233,7 +225,7 @@ set_default_sl_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
- ip->atype = GOODFIT;
+ ip->astrat = ERTS_ALC_S_GOODFIT;
ip->init.util.name_prefix = "sl_";
ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED;
#ifndef SMALL_MEMORY
@@ -252,7 +244,7 @@ set_default_std_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
- ip->atype = BESTFIT;
+ ip->astrat = ERTS_ALC_S_BESTFIT;
ip->init.util.name_prefix = "std_";
ip->init.util.alloc_no = ERTS_ALC_A_STANDARD;
#ifndef SMALL_MEMORY
@@ -270,7 +262,7 @@ set_default_ll_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 0;
- ip->atype = BESTFIT;
+ ip->astrat = ERTS_ALC_S_BESTFIT;
ip->init.bf.ao = 1;
ip->init.util.ramv = 0;
ip->init.util.mmsbc = 0;
@@ -299,7 +291,7 @@ set_default_literal_alloc_opts(struct au_init *ip)
ip->disable_allowed = 0;
ip->thr_spec_allowed = 0;
ip->carrier_migration_allowed = 0;
- ip->atype = BESTFIT;
+ ip->astrat = ERTS_ALC_S_BESTFIT;
ip->init.bf.ao = 1;
ip->init.util.ramv = 0;
ip->init.util.mmsbc = 0;
@@ -349,7 +341,7 @@ set_default_exec_alloc_opts(struct au_init *ip)
ip->disable_allowed = 0;
ip->thr_spec_allowed = 0;
ip->carrier_migration_allowed = 0;
- ip->atype = BESTFIT;
+ ip->astrat = ERTS_ALC_S_BESTFIT;
ip->init.bf.ao = 1;
ip->init.util.ramv = 0;
ip->init.util.mmsbc = 0;
@@ -378,7 +370,7 @@ set_default_temp_alloc_opts(struct au_init *ip)
ip->thr_spec = 1;
ip->disable_allowed = 0;
ip->carrier_migration_allowed = 0;
- ip->atype = AFIT;
+ ip->astrat = ERTS_ALC_S_AFIT;
ip->init.util.name_prefix = "temp_";
ip->init.util.alloc_no = ERTS_ALC_A_TEMPORARY;
#ifndef SMALL_MEMORY
@@ -397,7 +389,7 @@ set_default_eheap_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
- ip->atype = GOODFIT;
+ ip->astrat = ERTS_ALC_S_GOODFIT;
ip->init.util.name_prefix = "eheap_";
ip->init.util.alloc_no = ERTS_ALC_A_EHEAP;
#ifndef SMALL_MEMORY
@@ -416,7 +408,7 @@ set_default_binary_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
- ip->atype = BESTFIT;
+ ip->astrat = ERTS_ALC_S_BESTFIT;
ip->init.util.name_prefix = "binary_";
ip->init.util.alloc_no = ERTS_ALC_A_BINARY;
#ifndef SMALL_MEMORY
@@ -435,7 +427,7 @@ set_default_ets_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
- ip->atype = BESTFIT;
+ ip->astrat = ERTS_ALC_S_BESTFIT;
ip->init.util.name_prefix = "ets_";
ip->init.util.alloc_no = ERTS_ALC_A_ETS;
#ifndef SMALL_MEMORY
@@ -453,7 +445,7 @@ set_default_driver_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
- ip->atype = BESTFIT;
+ ip->astrat = ERTS_ALC_S_BESTFIT;
ip->init.util.name_prefix = "driver_";
ip->init.util.alloc_no = ERTS_ALC_A_DRIVER;
#ifndef SMALL_MEMORY
@@ -473,7 +465,7 @@ set_default_fix_alloc_opts(struct au_init *ip,
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
- ip->atype = BESTFIT;
+ ip->astrat = ERTS_ALC_S_BESTFIT;
ip->init.bf.ao = 1;
ip->init.util.name_prefix = "fix_";
ip->init.util.fix_type_size = fix_type_sizes;
@@ -493,7 +485,7 @@ set_default_test_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = 0; /* Disabled by default */
ip->thr_spec = -1 * erts_no_schedulers;
- ip->atype = FIRSTFIT;
+ ip->astrat = ERTS_ALC_S_FIRSTFIT;
ip->init.aoff.crr_order = FF_AOFF;
ip->init.aoff.blk_order = FF_BF;
ip->init.util.name_prefix = "test_";
@@ -552,8 +544,8 @@ start_au_allocator(ErtsAlcType_t alctr_n,
static void
refuse_af_strategy(struct au_init *init)
{
- if (init->atype == AFIT)
- init->atype = GOODFIT;
+ if (init->astrat == ERTS_ALC_S_AFIT)
+ init->astrat = ERTS_ALC_S_GOODFIT;
}
#ifdef HARD_DEBUG
@@ -576,7 +568,10 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size)
for (i=0; i < tspec->size; i++) {
Allctr_t* allctr = tspec->allctr[i];
for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
- allctr->fix[j].type_size += extra_block_size;
+ size_t size = allctr->fix[j].type_size;
+ size = MAX(size + extra_block_size,
+ sizeof(ErtsAllctrDDBlock_t));
+ allctr->fix[j].type_size = size;
}
}
}
@@ -584,8 +579,11 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size)
{
Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra;
for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
- allctr->fix[j].type_size += extra_block_size;
- }
+ size_t size = allctr->fix[j].type_size;
+ size = MAX(size + extra_block_size,
+ sizeof(ErtsAllctrDDBlock_t));
+ allctr->fix[j].type_size = size;
+ }
}
}
}
@@ -597,7 +595,7 @@ strategy_support_carrier_migration(struct au_init *auip)
* Currently only aoff* and ageff* support carrier
* migration, i.e, type AOFIRSTFIT.
*/
- return auip->atype == FIRSTFIT;
+ return auip->astrat == ERTS_ALC_S_FIRSTFIT;
}
static ERTS_INLINE void
@@ -612,7 +610,7 @@ adjust_carrier_migration_support(struct au_init *auip)
*/
if (!strategy_support_carrier_migration(auip)) {
/* Default to aoffcbf */
- auip->atype = FIRSTFIT;
+ auip->astrat = ERTS_ALC_S_FIRSTFIT;
auip->init.aoff.crr_order = FF_AOFF;
auip->init.aoff.blk_order = FF_BF;
}
@@ -1018,7 +1016,7 @@ start_au_allocator(ErtsAlcType_t alctr_n,
int i;
int size = 1;
void *as0;
- enum allctr_type atype;
+ ErtsAlcStrat_t astrat;
ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
@@ -1077,7 +1075,7 @@ start_au_allocator(ErtsAlcType_t alctr_n,
for (i = 0; i < size; i++) {
Allctr_t *as;
- atype = init->atype;
+ astrat = init->astrat;
if (!init->thr_spec)
as0 = state;
@@ -1094,8 +1092,8 @@ start_au_allocator(ErtsAlcType_t alctr_n,
if (i != 0)
init->init.util.ts = 0;
else {
- if (atype == AFIT)
- atype = GOODFIT;
+ if (astrat == ERTS_ALC_S_AFIT)
+ astrat = ERTS_ALC_S_GOODFIT;
init->init.util.ts = 1;
}
init->init.util.tspec = init->thr_spec + 1;
@@ -1109,25 +1107,26 @@ start_au_allocator(ErtsAlcType_t alctr_n,
(((char *) fix_lists) + fix_list_size));
}
+ init->init.util.alloc_strat = astrat;
init->init.util.ix = i;
- switch (atype) {
- case GOODFIT:
+ switch (astrat) {
+ case ERTS_ALC_S_GOODFIT:
as = erts_gfalc_start((GFAllctr_t *) as0,
&init->init.gf,
&init->init.util);
break;
- case BESTFIT:
+ case ERTS_ALC_S_BESTFIT:
as = erts_bfalc_start((BFAllctr_t *) as0,
&init->init.bf,
&init->init.util);
break;
- case AFIT:
+ case ERTS_ALC_S_AFIT:
as = erts_afalc_start((AFAllctr_t *) as0,
&init->init.af,
&init->init.util);
break;
- case FIRSTFIT:
+ case ERTS_ALC_S_FIRSTFIT:
as = erts_aoffalc_start((AOFFAllctr_t *) as0,
&init->init.aoff,
&init->init.util);
@@ -1363,51 +1362,59 @@ handle_au_arg(struct au_init *auip,
else if(has_prefix("as", sub_param)) {
char *alg = get_value(sub_param + 2, argv, ip);
if (sys_strcmp("bf", alg) == 0) {
- auip->atype = BESTFIT;
+ auip->astrat = ERTS_ALC_S_BESTFIT;
auip->init.bf.ao = 0;
}
else if (sys_strcmp("aobf", alg) == 0) {
- auip->atype = BESTFIT;
+ auip->astrat = ERTS_ALC_S_BESTFIT;
auip->init.bf.ao = 1;
}
else if (sys_strcmp("gf", alg) == 0) {
- auip->atype = GOODFIT;
+ auip->astrat = ERTS_ALC_S_GOODFIT;
}
else if (sys_strcmp("af", alg) == 0) {
- auip->atype = AFIT;
+ auip->astrat = ERTS_ALC_S_AFIT;
}
else if (sys_strcmp("aoff", alg) == 0) {
- auip->atype = FIRSTFIT;
+ auip->astrat = ERTS_ALC_S_FIRSTFIT;
auip->init.aoff.crr_order = FF_AOFF;
auip->init.aoff.blk_order = FF_AOFF;
}
else if (sys_strcmp("aoffcbf", alg) == 0) {
- auip->atype = FIRSTFIT;
+ auip->astrat = ERTS_ALC_S_FIRSTFIT;
auip->init.aoff.crr_order = FF_AOFF;
auip->init.aoff.blk_order = FF_BF;
}
else if (sys_strcmp("aoffcaobf", alg) == 0) {
- auip->atype = FIRSTFIT;
+ auip->astrat = ERTS_ALC_S_FIRSTFIT;
auip->init.aoff.crr_order = FF_AOFF;
auip->init.aoff.blk_order = FF_AOBF;
}
else if (sys_strcmp("ageffcaoff", alg) == 0) {
- auip->atype = FIRSTFIT;
+ auip->astrat = ERTS_ALC_S_FIRSTFIT;
auip->init.aoff.crr_order = FF_AGEFF;
auip->init.aoff.blk_order = FF_AOFF;
}
else if (sys_strcmp("ageffcbf", alg) == 0) {
- auip->atype = FIRSTFIT;
+ auip->astrat = ERTS_ALC_S_FIRSTFIT;
auip->init.aoff.crr_order = FF_AGEFF;
auip->init.aoff.blk_order = FF_BF;
}
else if (sys_strcmp("ageffcaobf", alg) == 0) {
- auip->atype = FIRSTFIT;
+ auip->astrat = ERTS_ALC_S_FIRSTFIT;
auip->init.aoff.crr_order = FF_AGEFF;
auip->init.aoff.blk_order = FF_AOBF;
}
else {
- bad_value(param, sub_param + 1, alg);
+ if (auip->init.util.alloc_no == ERTS_ALC_A_TEST
+ && sys_strcmp("chaosff", alg) == 0) {
+ auip->astrat = ERTS_ALC_S_FIRSTFIT;
+ auip->init.aoff.crr_order = FF_CHAOS;
+ auip->init.aoff.blk_order = FF_CHAOS;
+ }
+ else {
+ bad_value(param, sub_param + 1, alg);
+ }
}
if (!strategy_support_carrier_migration(auip))
auip->init.util.acul = 0;
@@ -2030,33 +2037,55 @@ erts_realloc_n_enomem(ErtsAlcType_t n, void *ptr, Uint size)
}
static ERTS_INLINE UWord
-alcu_size(ErtsAlcType_t ai, ErtsAlcUFixInfo_t *fi, int fisz)
+alcu_size(ErtsAlcType_t alloc_no, ErtsAlcUFixInfo_t *fi, int fisz)
{
- UWord res = 0;
+ UWord res;
+ int ai;
- ASSERT(erts_allctrs_info[ai].enabled);
- ASSERT(erts_allctrs_info[ai].alloc_util);
+ if (!erts_allctrs_info[alloc_no].thr_spec) {
+ AllctrSize_t size;
+ Allctr_t *allctr;
- if (!erts_allctrs_info[ai].thr_spec) {
- Allctr_t *allctr = erts_allctrs_info[ai].extra;
- AllctrSize_t asize;
- erts_alcu_current_size(allctr, &asize, fi, fisz);
- res += asize.blocks;
+ allctr = erts_allctrs_info[alloc_no].extra;
+ erts_alcu_current_size(allctr, &size, fi, fisz);
+
+ return size.blocks;
}
- else {
- ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ai];
- int i;
- ASSERT(tspec->enabled);
+ res = 0;
- for (i = tspec->size - 1; i >= 0; i--) {
- Allctr_t *allctr = tspec->allctr[i];
- AllctrSize_t asize;
- if (allctr) {
- erts_alcu_current_size(allctr, &asize, fi, fisz);
- res += asize.blocks;
- }
- }
+ /* Thread-specific allocators can migrate carriers across types, so we have
+ * to visit every allocator type to gather information on blocks that were
+ * allocated by us. */
+ for (ai = ERTS_ALC_A_MIN; ai < ERTS_ALC_A_MAX; ai++) {
+ ErtsAllocatorThrSpec_t *tspec;
+ Allctr_t *allctr;
+ int i;
+
+ if (!erts_allctrs_info[ai].thr_spec) {
+ continue;
+ }
+
+ tspec = &erts_allctr_thr_spec[ai];
+ ASSERT(tspec->enabled);
+
+ for (i = tspec->size - 1; i >= 0; i--) {
+ allctr = tspec->allctr[i];
+
+ if (allctr) {
+ AllctrSize_t size;
+
+ if (ai == alloc_no) {
+ erts_alcu_current_size(allctr, &size, fi, fisz);
+ } else {
+ erts_alcu_foreign_size(allctr, alloc_no, &size);
+ }
+
+ ASSERT(((SWord)size.blocks) >= 0);
+
+ res += size.blocks;
+ }
+ }
}
return res;
@@ -2400,6 +2429,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
}
if (want_tot_or_sys) {
+ ASSERT(size.total >= size.processes);
size.system = size.total - size.processes;
}
@@ -3459,29 +3489,29 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
switch (op) {
case 0xf00:
if (((Allctr_t *) a1)->thread_safe)
- return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF,
+ return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_TEST,
(void *) a1,
(Uint) a2);
else
- return (UWord) erts_alcu_alloc(ERTS_ALC_T_UNDEF,
+ return (UWord) erts_alcu_alloc(ERTS_ALC_T_TEST,
(void *) a1,
(Uint) a2);
case 0xf01:
if (((Allctr_t *) a1)->thread_safe)
- return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF,
+ return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_TEST,
(void *) a1,
(void *) a2,
(Uint) a3);
else
- return (UWord) erts_alcu_realloc(ERTS_ALC_T_UNDEF,
+ return (UWord) erts_alcu_realloc(ERTS_ALC_T_TEST,
(void *) a1,
(void *) a2,
(Uint) a3);
case 0xf02:
if (((Allctr_t *) a1)->thread_safe)
- erts_alcu_free_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
+ erts_alcu_free_ts(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);
else
- erts_alcu_free(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
+ erts_alcu_free(ERTS_ALC_T_TEST, (void *) a1, (void *) a2);
return 0;
case 0xf03: {
Allctr_t *allctr;
@@ -3489,8 +3519,10 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
SET_DEFAULT_ALLOC_OPTS(&init);
init.enable = 1;
- init.atype = GOODFIT;
+ init.astrat = ERTS_ALC_S_GOODFIT;
init.init.util.name_prefix = (char *) a1;
+ init.init.util.alloc_no = ERTS_ALC_A_TEST;
+ init.init.util.alloc_strat = init.astrat;
init.init.util.ts = 1;
if ((char **) a3) {
char **argv = (char **) a3;
@@ -3504,31 +3536,31 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
}
}
- switch (init.atype) {
- case GOODFIT:
+ switch (init.astrat) {
+ case ERTS_ALC_S_GOODFIT:
allctr = erts_gfalc_start((GFAllctr_t *)
- erts_alloc(ERTS_ALC_T_UNDEF,
+ erts_alloc(ERTS_ALC_T_TEST,
sizeof(GFAllctr_t)),
&init.init.gf,
&init.init.util);
break;
- case BESTFIT:
+ case ERTS_ALC_S_BESTFIT:
allctr = erts_bfalc_start((BFAllctr_t *)
- erts_alloc(ERTS_ALC_T_UNDEF,
+ erts_alloc(ERTS_ALC_T_TEST,
sizeof(BFAllctr_t)),
&init.init.bf,
&init.init.util);
break;
- case AFIT:
+ case ERTS_ALC_S_AFIT:
allctr = erts_afalc_start((AFAllctr_t *)
- erts_alloc(ERTS_ALC_T_UNDEF,
+ erts_alloc(ERTS_ALC_T_TEST,
sizeof(AFAllctr_t)),
&init.init.af,
&init.init.util);
break;
- case FIRSTFIT:
+ case ERTS_ALC_S_FIRSTFIT:
allctr = erts_aoffalc_start((AOFFAllctr_t *)
- erts_alloc(ERTS_ALC_T_UNDEF,
+ erts_alloc(ERTS_ALC_T_TEST,
sizeof(AOFFAllctr_t)),
&init.init.aoff,
&init.init.util);
@@ -3544,7 +3576,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
}
case 0xf04:
erts_alcu_stop((Allctr_t *) a1);
- erts_free(ERTS_ALC_T_UNDEF, (void *) a1);
+ erts_free(ERTS_ALC_T_TEST, (void *) a1);
break;
case 0xf05: return (UWord) 1;
case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe;
@@ -3554,7 +3586,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
case 0xf07: return (UWord) ((Allctr_t *) a1)->thread_safe;
#endif
case 0xf08: {
- ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_mutex));
+ ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_mutex));
if (ethr_mutex_init(mtx) != 0)
ERTS_ALC_TEST_ABORT;
return (UWord) mtx;
@@ -3563,7 +3595,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
ethr_mutex *mtx = (ethr_mutex *) a1;
if (ethr_mutex_destroy(mtx) != 0)
ERTS_ALC_TEST_ABORT;
- erts_free(ERTS_ALC_T_UNDEF, (void *) mtx);
+ erts_free(ERTS_ALC_T_TEST, (void *) mtx);
break;
}
case 0xf0a:
@@ -3573,7 +3605,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
ethr_mutex_unlock((ethr_mutex *) a1);
break;
case 0xf0c: {
- ethr_cond *cnd = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_cond));
+ ethr_cond *cnd = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_cond));
if (ethr_cond_init(cnd) != 0)
ERTS_ALC_TEST_ABORT;
return (UWord) cnd;
@@ -3582,7 +3614,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
ethr_cond *cnd = (ethr_cond *) a1;
if (ethr_cond_destroy(cnd) != 0)
ERTS_ALC_TEST_ABORT;
- erts_free(ERTS_ALC_T_UNDEF, (void *) cnd);
+ erts_free(ERTS_ALC_T_TEST, (void *) cnd);
break;
}
case 0xf0e:
@@ -3596,7 +3628,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
break;
}
case 0xf10: {
- ethr_tid *tid = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_tid));
+ ethr_tid *tid = erts_alloc(ERTS_ALC_T_TEST, sizeof(ethr_tid));
if (ethr_thr_create(tid,
(void * (*)(void *)) a1,
(void *) a2,
@@ -3608,7 +3640,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
ethr_tid *tid = (ethr_tid *) a1;
if (ethr_thr_join(*tid, NULL) != 0)
ERTS_ALC_TEST_ABORT;
- erts_free(ERTS_ALC_T_UNDEF, (void *) tid);
+ erts_free(ERTS_ALC_T_TEST, (void *) tid);
break;
}
case 0xf12:
@@ -3872,7 +3904,7 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
{
Uint sz;
Uint found_type;
- UWord pre_pattern;
+ UWord pre_pattern, expected_pattern;
UWord post_pattern;
UWord *ui_ptr;
#ifdef HARD_DEBUG
@@ -3882,6 +3914,8 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
if (!ptr)
return NULL;
+ expected_pattern = MK_PATTERN(n);
+
ui_ptr = (UWord *) ptr;
pre_pattern = *(--ui_ptr);
*size = sz = *(--ui_ptr);
@@ -3890,7 +3924,13 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
#endif
found_type = GET_TYPE_OF_PATTERN(pre_pattern);
- if (pre_pattern != MK_PATTERN(n)) {
+
+ if (found_type != n) {
+ erts_exit(ERTS_ABORT_EXIT, "ERROR: Miss matching allocator types"
+ " used in alloc and free\n");
+ }
+
+ if (pre_pattern != expected_pattern) {
if ((FIXED_FENCE_PATTERN_MASK & pre_pattern) != FIXED_FENCE_PATTERN)
erts_exit(ERTS_ABORT_EXIT,
"ERROR: Fence at beginning of memory block (p=0x%u) "
@@ -3900,8 +3940,7 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
sys_memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord));
- if (post_pattern != MK_PATTERN(n)
- || pre_pattern != post_pattern) {
+ if (post_pattern != expected_pattern || pre_pattern != post_pattern) {
char fbuf[10];
char obuf[10];
char *ftype;
@@ -3960,9 +3999,10 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];
static void *
-debug_alloc(ErtsAlcType_t n, void *extra, Uint size)
+debug_alloc(ErtsAlcType_t type, void *extra, Uint size)
{
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ ErtsAlcType_t n;
Uint dsize;
void *res;
@@ -3970,9 +4010,11 @@ debug_alloc(ErtsAlcType_t n, void *extra, Uint size)
erts_hdbg_chk_blks();
#endif
+ n = ERTS_ALC_T2N(type);
+
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
dsize = size + FENCE_SZ;
- res = (*real_af->alloc)(n, real_af->extra, dsize);
+ res = (*real_af->alloc)(type, real_af->extra, dsize);
res = set_memory_fence(res, size, n);
@@ -3986,14 +4028,17 @@ debug_alloc(ErtsAlcType_t n, void *extra, Uint size)
static void *
-debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
+debug_realloc(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
{
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ ErtsAlcType_t n;
Uint dsize;
Uint old_size;
void *dptr;
void *res;
+ n = ERTS_ALC_T2N(type);
+
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
dsize = size + FENCE_SZ;
@@ -4003,12 +4048,12 @@ debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
erts_hdbg_chk_blks();
#endif
- if (old_size > size)
+ if (ptr && old_size > size)
sys_memset((void *) (((char *) ptr) + size),
0xf,
sizeof(Uint) + old_size - size);
- res = (*real_af->realloc)(n, real_af->extra, dptr, dsize);
+ res = (*real_af->realloc)(type, real_af->extra, dptr, dsize);
res = set_memory_fence(res, size, n);
@@ -4021,12 +4066,16 @@ debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
}
static void
-debug_free(ErtsAlcType_t n, void *extra, void *ptr)
+debug_free(ErtsAlcType_t type, void *extra, void *ptr)
{
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ ErtsAlcType_t n;
void *dptr;
Uint size;
- int free_pattern = n;
+ int free_pattern;
+
+ n = ERTS_ALC_T2N(type);
+ free_pattern = n;
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
@@ -4044,7 +4093,7 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
#endif
sys_memset((void *) dptr, free_pattern, size + FENCE_SZ);
- (*real_af->free)(n, real_af->extra, dptr);
+ (*real_af->free)(type, real_af->extra, dptr);
#ifdef PRINT_OPS
fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index fcb58ff58a..c13cf3f5b0 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -26,10 +26,23 @@
#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
#include "erl_thr_progress.h"
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
-#include "erl_alloc_util.h"
#include "erl_threads.h"
#include "erl_mmap.h"
+typedef enum {
+ ERTS_ALC_S_INVALID = 0,
+
+ ERTS_ALC_S_GOODFIT,
+ ERTS_ALC_S_BESTFIT,
+ ERTS_ALC_S_AFIT,
+ ERTS_ALC_S_FIRSTFIT,
+
+ ERTS_ALC_S_MIN = ERTS_ALC_S_GOODFIT,
+ ERTS_ALC_S_MAX = ERTS_ALC_S_FIRSTFIT
+} ErtsAlcStrat_t;
+
+#include "erl_alloc_util.h"
+
#ifdef DEBUG
# undef ERTS_ALC_WANT_INLINE
# define ERTS_ALC_WANT_INLINE 0
@@ -52,6 +65,14 @@
#define ERTS_ALC_NO_FIXED_SIZES \
(ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE + 1)
+#define ERTS_ALC_IS_FIX_TYPE(T) \
+ (ERTS_ALC_T2N(T) >= ERTS_ALC_N_MIN_A_FIXED_SIZE && \
+ ERTS_ALC_T2N(T) <= ERTS_ALC_N_MAX_A_FIXED_SIZE)
+
+#define ERTS_ALC_FIX_TYPE_IX(T) \
+ (ASSERT(ERTS_ALC_IS_FIX_TYPE(T)), \
+ ERTS_ALC_T2N((T)) - ERTS_ALC_N_MIN_A_FIXED_SIZE)
+
void erts_sys_alloc_init(void);
void *erts_sys_alloc(ErtsAlcType_t, void *, Uint);
void *erts_sys_realloc(ErtsAlcType_t, void *, void *, Uint);
@@ -228,7 +249,7 @@ void *erts_alloc(ErtsAlcType_t type, Uint size)
void *res;
ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
res = (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)(
- ERTS_ALC_T2N(type),
+ type,
erts_allctrs[ERTS_ALC_T2A(type)].extra,
size);
if (!res)
@@ -243,7 +264,7 @@ void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size)
void *res;
ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
res = (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)(
- ERTS_ALC_T2N(type),
+ type,
erts_allctrs[ERTS_ALC_T2A(type)].extra,
ptr,
size);
@@ -258,7 +279,7 @@ void erts_free(ErtsAlcType_t type, void *ptr)
{
ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
(*erts_allctrs[ERTS_ALC_T2A(type)].free)(
- ERTS_ALC_T2N(type),
+ type,
erts_allctrs[ERTS_ALC_T2A(type)].extra,
ptr);
ERTS_MSACC_POP_STATE_X();
@@ -271,7 +292,7 @@ void *erts_alloc_fnf(ErtsAlcType_t type, Uint size)
void *res;
ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
res = (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)(
- ERTS_ALC_T2N(type),
+ type,
erts_allctrs[ERTS_ALC_T2A(type)].extra,
size);
ERTS_MSACC_POP_STATE_X();
@@ -285,7 +306,7 @@ void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size)
void *res;
ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
res = (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)(
- ERTS_ALC_T2N(type),
+ type,
erts_allctrs[ERTS_ALC_T2A(type)].extra,
ptr,
size);
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 4f03a34390..92e5069c71 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -30,10 +30,10 @@
# name space).
# * Types, allocators, classes, and descriptions have different name
# spaces.
-# * The type, allocator, and class names INVALID are reserved and can
-# not be used.
+# * The type, allocator, and class names INVALID are reserved and
+# cannot be used.
# * The descriptions invalid_allocator, invalid_class, and invalid_type
-# are reserved and can not be used.
+# are reserved and cannot be used.
# * Declarations can be done conditionally by use of a
# +if <boolean_variable>
#
@@ -275,11 +275,14 @@ type ML_DIST STANDARD SYSTEM monitor_link_dist
type PF3_ARGS SHORT_LIVED PROCESSES process_flag_3_arguments
type SETUP_CONN_ARG SHORT_LIVED PROCESSES setup_connection_argument
type LIST_TRAP SHORT_LIVED PROCESSES list_bif_trap_state
+type CONT_EXIT_TRAP SHORT_LIVED PROCESSES continue_exit_trap_state
+type SEQ_YIELD_STATE SHORT_LIVED SYSTEM dist_seq_yield_state
type ENVIRONMENT SYSTEM SYSTEM environment
type PERSISTENT_TERM LONG_LIVED CODE persisten_term
type PERSISTENT_LOCK_Q SHORT_LIVED SYSTEM persistent_lock_q
+type PERSISTENT_TERM_TMP SHORT_LIVED SYSTEM persistent_term_tmp_table
#
# Types used for special emulators
@@ -345,16 +348,17 @@ type COUNTERS STANDARD SYSTEM erl_bif_counters
# Types used by system specific code
#
-type TEMP_TERM TEMPORARY SYSTEM temp_term
-type DRV_TAB LONG_LIVED SYSTEM drv_tab
-type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
-type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state
-type NIF_SEL_D_STATE FIXED_SIZE SYSTEM enif_select_data_state
-type POLLSET LONG_LIVED SYSTEM pollset
-type POLLSET_UPDREQ SHORT_LIVED SYSTEM pollset_update_req
-type POLL_FDS LONG_LIVED SYSTEM poll_fds
-type FD_STATUS LONG_LIVED SYSTEM fd_status
-type SELECT_FDS LONG_LIVED SYSTEM select_fds
+type TEMP_TERM TEMPORARY SYSTEM temp_term
+type SHORT_LIVED_TERM SHORT_LIVED SYSTEM short_lived_term
+type DRV_TAB LONG_LIVED SYSTEM drv_tab
+type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
+type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state
+type NIF_SEL_D_STATE FIXED_SIZE SYSTEM enif_select_data_state
+type POLLSET LONG_LIVED SYSTEM pollset
+type POLLSET_UPDREQ SHORT_LIVED SYSTEM pollset_update_req
+type POLL_FDS LONG_LIVED SYSTEM poll_fds
+type FD_STATUS LONG_LIVED SYSTEM fd_status
+type SELECT_FDS LONG_LIVED SYSTEM select_fds
+if unix
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 0be4562785..25ac3bc5af 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -42,6 +42,7 @@
#include "global.h"
#include "big.h"
+#include "erl_mmap.h"
#include "erl_mtrace.h"
#define GET_ERL_ALLOC_UTIL_IMPL
#include "erl_alloc_util.h"
@@ -90,15 +91,18 @@ static int initialized = 0;
#define SYS_ALLOC_CARRIER_FLOOR(X) ((X) & SYS_ALLOC_CARRIER_MASK)
#define SYS_ALLOC_CARRIER_CEILING(X) \
SYS_ALLOC_CARRIER_FLOOR((X) + INV_SYS_ALLOC_CARRIER_MASK)
+#define SYS_PAGE_SIZE (sys_page_size)
+#define SYS_PAGE_SZ_MASK ((UWord)(SYS_PAGE_SIZE - 1))
#if 0
/* Can be useful for debugging */
#define MBC_REALLOC_ALWAYS_MOVES
#endif
-
/* alloc_util global parameters */
static Uint sys_alloc_carrier_size;
+static Uint sys_page_size;
+
#if HAVE_ERTS_MSEG
static Uint max_mseg_carriers;
#endif
@@ -113,32 +117,38 @@ static int allow_sys_alloc_carriers;
#define DEC_CC(CC) ((CC)--)
-/* Multi block carrier (MBC) memory layout in R16:
+/* Multi block carrier (MBC) memory layout in OTP 22:
Empty MBC:
-[Carrier_t|pad|Block_t L0T|fhdr| free... ]
+[Carrier_t|pad|Block_t L0T0|fhdr| free... ]
MBC after allocating first block:
-[Carrier_t|pad|Block_t 000| udata |pad|Block_t L0T|fhdr| free... ]
+[Carrier_t|pad|Block_t 0000| udata |pad|Block_t L0T0|fhdr| free... ]
MBC after allocating second block:
-[Carrier_t|pad|Block_t 000| udata |pad|Block_t 000| udata |pad|Block_t L0T|fhdr| free... ]
+[Carrier_t|pad|Block_t 0000| udata |pad|Block_t 0000| udata |pad|Block_t L0T0|fhdr| free... ]
MBC after deallocating first block:
-[Carrier_t|pad|Block_t 00T|fhdr| free |FreeBlkFtr_t|Block_t 0P0| udata |pad|Block_t L0T|fhdr| free... ]
+[Carrier_t|pad|Block_t 00T0|fhdr| free |FreeBlkFtr_t|Block_t 0P00| udata |pad|Block_t L0T0|fhdr| free... ]
+MBC after allocating first block, with allocation tagging enabled:
+[Carrier_t|pad|Block_t 000A| udata |atag|pad|Block_t L0T0|fhdr| free... ]
udata = Allocated user data
+ atag = A tag with basic metadata about this allocation
pad = Padding to ensure correct alignment for user data
fhdr = Allocator specific header to keep track of free block
free = Unused free memory
T = This block is free (THIS_FREE_BLK_HDR_FLG)
P = Previous block is free (PREV_FREE_BLK_HDR_FLG)
L = Last block in carrier (LAST_BLK_HDR_FLG)
+ A = Block has an allocation tag footer, only valid for allocated blocks
+ (ATAG_BLK_HDR_FLG)
*/
/* Single block carrier (SBC):
-[Carrier_t|pad|Block_t 111| udata... ]
+[Carrier_t|pad|Block_t 1110| udata... ]
+[Carrier_t|pad|Block_t 111A| udata | atag]
*/
/* Allocation tags ...
@@ -154,20 +164,20 @@ MBC after deallocating first block:
typedef UWord alcu_atag_t;
-#define MAKE_ATAG(IdAtom, Type) \
- (ASSERT((Type) >= ERTS_ALC_N_MIN && (Type) <= ERTS_ALC_N_MAX), \
+#define MAKE_ATAG(IdAtom, TypeNum) \
+ (ASSERT((TypeNum) >= ERTS_ALC_N_MIN && (TypeNum) <= ERTS_ALC_N_MAX), \
ASSERT(atom_val(IdAtom) <= MAX_ATAG_ATOM_ID), \
- (atom_val(IdAtom) << ERTS_ALC_N_BITS) | (Type))
+ (atom_val(IdAtom) << ERTS_ALC_N_BITS) | (TypeNum))
#define ATAG_ID(AT) (make_atom((AT) >> ERTS_ALC_N_BITS))
#define ATAG_TYPE(AT) ((AT) & ERTS_ALC_N_MASK)
#define MAX_ATAG_ATOM_ID (ERTS_UWORD_MAX >> ERTS_ALC_N_BITS)
-#define DBG_IS_VALID_ATAG(Allocator, AT) \
+#define DBG_IS_VALID_ATAG(AT) \
(ATAG_TYPE(AT) >= ERTS_ALC_N_MIN && \
ATAG_TYPE(AT) <= ERTS_ALC_N_MAX && \
- (Allocator)->alloc_no == ERTS_ALC_T2A(ERTS_ALC_N2T(ATAG_TYPE(AT))))
+ ATAG_ID(AT) <= MAX_ATAG_ATOM_ID)
/* Blocks ... */
@@ -182,10 +192,15 @@ typedef UWord alcu_atag_t;
#endif
#define FBLK_FTR_SZ (sizeof(FreeBlkFtr_t))
+#define BLK_HAS_ATAG(B) \
+ (!!((B)->bhdr & ATAG_BLK_HDR_FLG))
+
#define GET_BLK_ATAG(B) \
- (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1])
+ (ASSERT(BLK_HAS_ATAG(B)), \
+ ((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1])
#define SET_BLK_ATAG(B, T) \
- (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1] = (T))
+ ((B)->bhdr |= ATAG_BLK_HDR_FLG, \
+ ((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1] = (T))
#define BLK_ATAG_SZ(AP) ((AP)->atags ? sizeof(alcu_atag_t) : 0)
@@ -203,13 +218,13 @@ typedef UWord alcu_atag_t;
(((FreeBlkFtr_t *) (((char *) (B)) + (SZ)))[-1] = (SZ))
#define SET_MBC_ABLK_SZ(B, SZ) \
- (ASSERT(((SZ) & FLG_MASK) == 0), \
+ (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \
(B)->bhdr = (((B)->bhdr) & ~MBC_ABLK_SZ_MASK) | (SZ))
#define SET_MBC_FBLK_SZ(B, SZ) \
- (ASSERT(((SZ) & FLG_MASK) == 0), \
+ (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \
(B)->bhdr = (((B)->bhdr) & ~MBC_FBLK_SZ_MASK) | (SZ))
#define SET_SBC_BLK_SZ(B, SZ) \
- (ASSERT(((SZ) & FLG_MASK) == 0), \
+ (ASSERT(((SZ) & BLK_FLG_MASK) == 0), \
(B)->bhdr = (((B)->bhdr) & ~SBC_BLK_SZ_MASK) | (SZ))
#define SET_PREV_BLK_FREE(AP,B) \
(ASSERT(!IS_MBC_FIRST_BLK(AP,B)), \
@@ -235,12 +250,12 @@ typedef UWord alcu_atag_t;
# define SET_MBC_ABLK_HDR(B, Sz, F, C) \
(ASSERT(((Sz) & ~MBC_ABLK_SZ_MASK) == 0), \
- ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
+ ASSERT(!((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
(B)->bhdr = ((Sz) | (F) | (BLK_CARRIER_OFFSET(B,C) << MBC_ABLK_OFFSET_SHIFT)))
# define SET_MBC_FBLK_HDR(B, Sz, F, C) \
(ASSERT(((Sz) & ~MBC_FBLK_SZ_MASK) == 0), \
- ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
+ ASSERT(((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
(B)->bhdr = ((Sz) | (F)), \
(B)->u.carrier = (C))
@@ -257,8 +272,8 @@ typedef UWord alcu_atag_t;
# define SET_BLK_FREE(B) \
(ASSERT(!IS_PREV_BLK_FREE(B)), \
(B)->u.carrier = ABLK_TO_MBC(B), \
- (B)->bhdr |= THIS_FREE_BLK_HDR_FLG, \
- (B)->bhdr &= (MBC_ABLK_SZ_MASK|FLG_MASK))
+ (B)->bhdr &= (MBC_ABLK_SZ_MASK|LAST_BLK_HDR_FLG), \
+ (B)->bhdr |= THIS_FREE_BLK_HDR_FLG)
# define SET_BLK_ALLOCED(B) \
(ASSERT(((B)->bhdr & (MBC_ABLK_OFFSET_MASK|THIS_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
@@ -270,15 +285,16 @@ typedef UWord alcu_atag_t;
# define MBC_SZ_MAX_LIMIT ((UWord)~0)
# define SET_MBC_ABLK_HDR(B, Sz, F, C) \
- (ASSERT(((Sz) & FLG_MASK) == 0), \
- ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
- ASSERT((UWord)(F) < SBC_BLK_HDR_FLG), \
+ (ASSERT(((Sz) & BLK_FLG_MASK) == 0), \
+ ASSERT(((F) & ~BLK_FLG_MASK) == 0), \
+ ASSERT(!((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
(B)->bhdr = ((Sz) | (F)), \
(B)->carrier = (C))
# define SET_MBC_FBLK_HDR(B, Sz, F, C) \
- (ASSERT(((Sz) & FLG_MASK) == 0), \
- ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
+ (ASSERT(((Sz) & BLK_FLG_MASK) == 0), \
+ ASSERT(((F) & ~BLK_FLG_MASK) == 0), \
+ ASSERT(((UWord)(F) & (~BLK_FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
(B)->bhdr = ((Sz) | (F)), \
(B)->carrier = (C))
@@ -297,7 +313,7 @@ typedef UWord alcu_atag_t;
#endif /* !MBC_ABLK_OFFSET_BITS */
#define SET_SBC_BLK_HDR(B, Sz) \
- (ASSERT(((Sz) & FLG_MASK) == 0), (B)->bhdr = ((Sz) | (SBC_BLK_HDR_FLG)))
+ (ASSERT(((Sz) & BLK_FLG_MASK) == 0), (B)->bhdr = ((Sz) | (SBC_BLK_HDR_FLG)))
#define BLK_UMEM_SZ(B) \
@@ -320,7 +336,7 @@ typedef UWord alcu_atag_t;
#define GET_PREV_FREE_BLK_HDR_FLG(B) \
((B)->bhdr & PREV_FREE_BLK_HDR_FLG)
#define GET_BLK_HDR_FLGS(B) \
- ((B)->bhdr & FLG_MASK)
+ ((B)->bhdr & BLK_FLG_MASK)
#define NXT_BLK(B) \
(ASSERT(IS_MBC_BLK(B)), \
@@ -419,7 +435,7 @@ do { \
#define SCH_SBC SBC_CARRIER_HDR_FLAG
#define SET_CARRIER_HDR(C, Sz, F, AP) \
- (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \
+ (ASSERT(((Sz) & CRR_FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \
erts_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP)))
#define BLK_TO_SBC(B) \
@@ -444,8 +460,8 @@ do { \
(!IS_SB_CARRIER((C)))
#define SET_CARRIER_SZ(C, SZ) \
- (ASSERT(((SZ) & FLG_MASK) == 0), \
- ((C)->chdr = ((C)->chdr & FLG_MASK) | (SZ)))
+ (ASSERT(((SZ) & CRR_FLG_MASK) == 0), \
+ ((C)->chdr = ((C)->chdr & CRR_FLG_MASK) | (SZ)))
#define CFLG_SBC (1 << 0)
#define CFLG_MBC (1 << 1)
@@ -575,10 +591,12 @@ do { \
STAT_MSEG_MBC_ALLOC((AP), csz__); \
else \
STAT_SYS_ALLOC_MBC_ALLOC((AP), csz__); \
- (AP)->mbcs.blocks.curr.no += (CRR)->cpool.blocks; \
+ set_new_allctr_abandon_limit(AP); \
+ (AP)->mbcs.blocks.curr.no += (CRR)->cpool.blocks[(AP)->alloc_no]; \
if ((AP)->mbcs.blocks.max.no < (AP)->mbcs.blocks.curr.no) \
(AP)->mbcs.blocks.max.no = (AP)->mbcs.blocks.curr.no; \
- (AP)->mbcs.blocks.curr.size += (CRR)->cpool.blocks_size; \
+ (AP)->mbcs.blocks.curr.size += \
+ (CRR)->cpool.blocks_size[(AP)->alloc_no]; \
if ((AP)->mbcs.blocks.max.size < (AP)->mbcs.blocks.curr.size) \
(AP)->mbcs.blocks.max.size = (AP)->mbcs.blocks.curr.size; \
} while (0)
@@ -601,25 +619,33 @@ do { \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
-#define STAT_MBC_ABANDON(AP, CRR) \
-do { \
- UWord csz__ = CARRIER_SZ((CRR)); \
- if (IS_MSEG_CARRIER((CRR))) \
- STAT_MSEG_MBC_FREE((AP), csz__); \
- else \
- STAT_SYS_ALLOC_MBC_FREE((AP), csz__); \
- ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.no \
- >= (CRR)->cpool.blocks); \
- (AP)->mbcs.blocks.curr.no -= (CRR)->cpool.blocks; \
- ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.size \
- >= (CRR)->cpool.blocks_size); \
- (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size; \
+#define STAT_MBC_FREE(AP, CRR) \
+do { \
+ UWord csz__ = CARRIER_SZ((CRR)); \
+ if (IS_MSEG_CARRIER((CRR))) { \
+ STAT_MSEG_MBC_FREE((AP), csz__); \
+ } else { \
+ STAT_SYS_ALLOC_MBC_FREE((AP), csz__); \
+ } \
+ set_new_allctr_abandon_limit(AP); \
} while (0)
-#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) \
+#define STAT_MBC_ABANDON(AP, CRR) \
+do { \
+ STAT_MBC_FREE(AP, CRR); \
+ ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.no \
+ >= (CRR)->cpool.blocks[(AP)->alloc_no]); \
+ (AP)->mbcs.blocks.curr.no -= (CRR)->cpool.blocks[(AP)->alloc_no]; \
+ ERTS_ALC_CPOOL_ASSERT((AP)->mbcs.blocks.curr.size \
+ >= (CRR)->cpool.blocks_size[(AP)->alloc_no]); \
+ (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size[(AP)->alloc_no]; \
+} while (0)
+
+#define STAT_MBC_BLK_ALLOC_CRR(AP, CRR, BSZ) \
do { \
- (CRR)->cpool.blocks++; \
- (CRR)->cpool.blocks_size += (BSZ); \
+ (CRR)->cpool.blocks[(AP)->alloc_no]++; \
+ (CRR)->cpool.blocks_size[(AP)->alloc_no] += (BSZ); \
+ (CRR)->cpool.total_blocks_size += (BSZ); \
} while (0)
#define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS) \
@@ -631,50 +657,67 @@ do { \
cstats__->blocks.curr.size += (BSZ); \
if (cstats__->blocks.max.size < cstats__->blocks.curr.size) \
cstats__->blocks.max.size = cstats__->blocks.curr.size; \
- STAT_MBC_BLK_ALLOC_CRR((CRR), (BSZ)); \
+ STAT_MBC_BLK_ALLOC_CRR((AP), (CRR), (BSZ)); \
} while (0)
static ERTS_INLINE int
stat_cpool_mbc_blk_free(Allctr_t *allctr,
+ ErtsAlcType_t type,
Carrier_t *crr,
Carrier_t **busy_pcrr_pp,
UWord blksz)
{
+ Allctr_t *orig_allctr;
+ int alloc_no;
- ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks > 0);
- crr->cpool.blocks--;
- ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size >= blksz);
- crr->cpool.blocks_size -= blksz;
+ alloc_no = ERTS_ALC_T2A(type);
- if (!busy_pcrr_pp || !*busy_pcrr_pp)
- return 0;
+ ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks[alloc_no] > 0);
+ crr->cpool.blocks[alloc_no]--;
+ ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size[alloc_no] >= blksz);
+ crr->cpool.blocks_size[alloc_no] -= blksz;
+ ERTS_ALC_CPOOL_ASSERT(crr->cpool.total_blocks_size >= blksz);
+ crr->cpool.total_blocks_size -= blksz;
+
+ if (allctr->alloc_no == alloc_no && (!busy_pcrr_pp || !*busy_pcrr_pp)) {
+ /* This is a local block, so we should not update the pool
+ * statistics. */
+ return 0;
+ }
- ERTS_ALC_CPOOL_ASSERT(crr == *busy_pcrr_pp);
+ /* This is either a foreign block that's been fetched from the pool, or any
+ * block that's in the pool. The carrier's owner keeps the statistics for
+ * both pooled and foreign blocks. */
+
+ orig_allctr = crr->cpool.orig_allctr;
+
+ ERTS_ALC_CPOOL_ASSERT(alloc_no != allctr->alloc_no ||
+ (crr == *busy_pcrr_pp && allctr == orig_allctr));
#ifdef ERTS_ALC_CPOOL_DEBUG
ERTS_ALC_CPOOL_ASSERT(
- erts_atomic_dec_read_nob(&allctr->cpool.stat.no_blocks) >= 0);
+ erts_atomic_dec_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]) >= 0);
ERTS_ALC_CPOOL_ASSERT(
- erts_atomic_add_read_nob(&allctr->cpool.stat.blocks_size,
+ erts_atomic_add_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
-((erts_aint_t) blksz)) >= 0);
#else
- erts_atomic_dec_nob(&allctr->cpool.stat.no_blocks);
- erts_atomic_add_nob(&allctr->cpool.stat.blocks_size,
+ erts_atomic_dec_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]);
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
-((erts_aint_t) blksz));
#endif
return 1;
}
-#define STAT_MBC_BLK_FREE(AP, CRR, BPCRRPP, BSZ, FLGS) \
-do { \
- if (!stat_cpool_mbc_blk_free((AP), (CRR), (BPCRRPP), (BSZ))) { \
- CarriersStats_t *cstats__ = &(AP)->mbcs; \
- ASSERT(cstats__->blocks.curr.no > 0); \
- cstats__->blocks.curr.no--; \
- ASSERT(cstats__->blocks.curr.size >= (BSZ)); \
- cstats__->blocks.curr.size -= (BSZ); \
- } \
+#define STAT_MBC_BLK_FREE(AP, TYPE, CRR, BPCRRPP, BSZ, FLGS) \
+do { \
+ if (!stat_cpool_mbc_blk_free((AP), (TYPE), (CRR), (BPCRRPP), (BSZ))) { \
+ CarriersStats_t *cstats__ = &(AP)->mbcs; \
+ ASSERT(cstats__->blocks.curr.no > 0); \
+ cstats__->blocks.curr.no--; \
+ ASSERT(cstats__->blocks.curr.size >= (BSZ)); \
+ cstats__->blocks.curr.size -= (BSZ); \
+ } \
} while (0)
/* Debug stuff... */
@@ -721,8 +764,8 @@ static void make_name_atoms(Allctr_t *allctr);
static Block_t *create_carrier(Allctr_t *, Uint, UWord);
static void destroy_carrier(Allctr_t *, Block_t *, Carrier_t **);
-static void mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp);
-static void dealloc_block(Allctr_t *, void *, ErtsAlcFixList_t *, int);
+static void mbc_free(Allctr_t *allctr, ErtsAlcType_t type, void *p, Carrier_t **busy_pcrr_pp);
+static void dealloc_block(Allctr_t *, ErtsAlcType_t, Uint32, void *, ErtsAlcFixList_t *);
static alcu_atag_t determine_alloc_tag(Allctr_t *allocator, ErtsAlcType_t type)
{
@@ -764,14 +807,14 @@ static alcu_atag_t determine_alloc_tag(Allctr_t *allocator, ErtsAlcType_t type)
}
}
- return MAKE_ATAG(id, type);
+ return MAKE_ATAG(id, ERTS_ALC_T2N(type));
}
static void set_alloc_tag(Allctr_t *allocator, void *p, alcu_atag_t tag)
{
Block_t *block;
- ASSERT(DBG_IS_VALID_ATAG(allocator, tag));
+ ASSERT(DBG_IS_VALID_ATAG(tag));
ASSERT(allocator->atags && p);
(void)allocator;
@@ -1312,28 +1355,9 @@ chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before)
#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B)
#endif
+static ERTS_INLINE Allctr_t *get_pref_allctr(void *extra);
static void *mbc_alloc(Allctr_t *allctr, Uint size);
-typedef struct {
- ErtsAllctrDDBlock_t ddblock__; /* must be first */
- ErtsAlcType_t fix_type;
-} ErtsAllctrFixDDBlock_t;
-
-#define ERTS_ALC_FIX_NO_UNUSE (((ErtsAlcType_t) 1) << ERTS_ALC_N_BITS)
-
-static ERTS_INLINE void
-dealloc_fix_block(Allctr_t *allctr,
- ErtsAlcType_t type,
- void *ptr,
- ErtsAlcFixList_t *fix,
- int dec_cc_on_redirect)
-{
- /* May be redirected... */
- ASSERT((type & ERTS_ALC_FIX_NO_UNUSE) == 0);
- ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type | ERTS_ALC_FIX_NO_UNUSE;
- dealloc_block(allctr, ptr, fix, dec_cc_on_redirect);
-}
-
static ERTS_INLINE void
sched_fix_shrink(Allctr_t *allctr, int on)
{
@@ -1375,7 +1399,7 @@ fix_cpool_check_shrink(Allctr_t *allctr,
if (fix->u.cpool.min_list_size > fix->list_size)
fix->u.cpool.min_list_size = fix->list_size;
- dealloc_fix_block(allctr, type, p, fix, 0);
+ dealloc_block(allctr, type, DEALLOC_FLG_FIX_SHRINK, p, fix);
}
}
}
@@ -1386,11 +1410,9 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
void *res;
ErtsAlcFixList_t *fix;
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type
- && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
-
- fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
- ASSERT(size == fix->type_size);
+ fix = &allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)];
+ ASSERT(type == fix->type && size == fix->type_size);
+ ASSERT(size >= sizeof(ErtsAllctrDDBlock_t));
res = fix->list;
if (res) {
@@ -1419,21 +1441,39 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
static ERTS_INLINE void
fix_cpool_free(Allctr_t *allctr,
ErtsAlcType_t type,
+ Uint32 flags,
void *p,
- Carrier_t **busy_pcrr_pp,
- int unuse)
+ Carrier_t **busy_pcrr_pp)
{
ErtsAlcFixList_t *fix;
+ Allctr_t *fix_allctr;
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type
- && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+ /* If this isn't a fix allocator we need to update the fix list of our
+ * neighboring fix_alloc to keep the statistics consistent. */
+ if (!allctr->fix) {
+ ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
+ fix_allctr = get_pref_allctr(tspec);
+ ASSERT(!fix_allctr->thread_safe);
+ ASSERT(allctr != fix_allctr);
+ }
+ else {
+ fix_allctr = allctr;
+ }
- fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+ ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(fix_allctr));
+ ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr));
- if (unuse)
- fix->u.cpool.used--;
+ fix = &fix_allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)];
+ ASSERT(type == fix->type);
+
+ if (!(flags & DEALLOC_FLG_FIX_SHRINK)) {
+ fix->u.cpool.used--;
+ }
- if ((!busy_pcrr_pp || !*busy_pcrr_pp)
+ /* We don't want foreign blocks to be long-lived, so we skip recycling if
+ * allctr != fix_allctr. */
+ if (allctr == fix_allctr
+ && (!busy_pcrr_pp || !*busy_pcrr_pp)
&& !fix->u.cpool.shrink_list
&& fix->list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
*((void **) p) = fix->list;
@@ -1446,7 +1486,7 @@ fix_cpool_free(Allctr_t *allctr,
if (IS_SBC_BLK(blk))
destroy_carrier(allctr, blk, NULL);
else
- mbc_free(allctr, p, busy_pcrr_pp);
+ mbc_free(allctr, type, p, busy_pcrr_pp);
fix->u.cpool.allocated--;
fix_cpool_check_shrink(allctr, type, fix, busy_pcrr_pp);
}
@@ -1473,7 +1513,7 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
fix->u.cpool.shrink_list = fix->u.cpool.min_list_size;
fix->u.cpool.min_list_size = fix->list_size;
}
- type = (ErtsAlcType_t) (ix + ERTS_ALC_N_MIN_A_FIXED_SIZE);
+ type = ERTS_ALC_N2T((ErtsAlcType_t) (ix + ERTS_ALC_N_MIN_A_FIXED_SIZE));
for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) {
void *ptr;
@@ -1487,7 +1527,7 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
fix->list = *((void **) ptr);
fix->list_size--;
fix->u.cpool.shrink_list--;
- dealloc_fix_block(allctr, type, ptr, fix, 0);
+ dealloc_block(allctr, type, DEALLOC_FLG_FIX_SHRINK, ptr, fix);
}
if (fix->u.cpool.min_list_size > fix->list_size)
fix->u.cpool.min_list_size = fix->list_size;
@@ -1513,11 +1553,9 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
ErtsAlcFixList_t *fix;
void *res;
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type
- && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
-
- fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
- ASSERT(size == fix->type_size);
+ fix = &allctr->fix[ERTS_ALC_FIX_TYPE_IX(type)];
+ ASSERT(type == fix->type && size == fix->type_size);
+ ASSERT(size >= sizeof(ErtsAllctrDDBlock_t));
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
fix->u.nocpool.used++;
@@ -1534,7 +1572,7 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
if (IS_SBC_BLK(blk))
destroy_carrier(allctr, blk, NULL);
else
- mbc_free(allctr, p, NULL);
+ mbc_free(allctr, type, p, NULL);
fix->u.nocpool.allocated--;
}
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
@@ -1569,10 +1607,8 @@ fix_nocpool_free(Allctr_t *allctr,
Block_t *blk;
ErtsAlcFixList_t *fix;
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= type
- && type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
-
- fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+ fix = &allctr->fix[ERTS_ALC_T2N(type) - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+ ASSERT(fix->type == type);
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
fix->u.nocpool.used--;
@@ -1591,7 +1627,7 @@ fix_nocpool_free(Allctr_t *allctr,
if (IS_SBC_BLK(blk))
destroy_carrier(allctr, blk, NULL);
else
- mbc_free(allctr, p, NULL);
+ mbc_free(allctr, type, p, NULL);
p = fix->list;
fix->list = *((void **) p);
fix->list_size--;
@@ -1602,7 +1638,7 @@ fix_nocpool_free(Allctr_t *allctr,
if (IS_SBC_BLK(blk))
destroy_carrier(allctr, blk, NULL);
else
- mbc_free(allctr, p, NULL);
+ mbc_free(allctr, type, p, NULL);
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
}
@@ -1643,7 +1679,7 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
ptr = fix->list;
fix->list = *((void **) ptr);
fix->list_size--;
- dealloc_block(allctr, ptr, NULL, 0);
+ dealloc_block(allctr, fix->type, 0, ptr, NULL);
fix->u.nocpool.allocated--;
}
if (fix->list_size != 0) {
@@ -1685,6 +1721,7 @@ dealloc_mbc(Allctr_t *allctr, Carrier_t *crr)
}
+static UWord allctr_abandon_limit(Allctr_t *allctr);
static void set_new_allctr_abandon_limit(Allctr_t*);
static void abandon_carrier(Allctr_t*, Carrier_t*);
static void poolify_my_carrier(Allctr_t*, Carrier_t*);
@@ -1806,7 +1843,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
static void
init_dd_queue(ErtsAllctrDDQueue_t *ddq)
{
- erts_atomic_init_nob(&ddq->tail.data.marker.atmc_next, ERTS_AINT_NULL);
+ erts_atomic_init_nob(&ddq->tail.data.marker.u.atmc_next, ERTS_AINT_NULL);
erts_atomic_init_nob(&ddq->tail.data.last,
(erts_aint_t) &ddq->tail.data.marker);
erts_atomic_init_nob(&ddq->tail.data.um_refc[0], 0);
@@ -1827,17 +1864,17 @@ ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)
erts_aint_t itmp;
ErtsAllctrDDBlock_t *enq, *this = ptr;
- erts_atomic_init_nob(&this->atmc_next, ERTS_AINT_NULL);
+ erts_atomic_init_nob(&this->u.atmc_next, ERTS_AINT_NULL);
/* Enqueue at end of list... */
enq = (ErtsAllctrDDBlock_t *) erts_atomic_read_nob(&ddq->tail.data.last);
- itmp = erts_atomic_cmpxchg_relb(&enq->atmc_next,
+ itmp = erts_atomic_cmpxchg_relb(&enq->u.atmc_next,
(erts_aint_t) this,
ERTS_AINT_NULL);
if (itmp == ERTS_AINT_NULL) {
/* We are required to move last pointer */
#ifdef DEBUG
- ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->atmc_next));
+ ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->u.atmc_next));
ASSERT(((erts_aint_t) enq)
== erts_atomic_xchg_relb(&ddq->tail.data.last,
(erts_aint_t) this));
@@ -1855,8 +1892,8 @@ ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)
while (1) {
erts_aint_t itmp2;
- erts_atomic_set_nob(&this->atmc_next, itmp);
- itmp2 = erts_atomic_cmpxchg_relb(&enq->atmc_next,
+ erts_atomic_set_nob(&this->u.atmc_next, itmp);
+ itmp2 = erts_atomic_cmpxchg_relb(&enq->u.atmc_next,
(erts_aint_t) this,
itmp);
if (itmp == itmp2)
@@ -1865,7 +1902,7 @@ ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr, int cinit)
itmp = itmp2;
else {
enq = (ErtsAllctrDDBlock_t *) itmp2;
- itmp = erts_atomic_read_acqb(&enq->atmc_next);
+ itmp = erts_atomic_read_acqb(&enq->u.atmc_next);
ASSERT(itmp != ERTS_AINT_NULL);
}
i++;
@@ -1881,8 +1918,8 @@ check_insert_marker(ErtsAllctrDDQueue_t *ddq, erts_aint_t ilast)
erts_aint_t itmp;
ErtsAllctrDDBlock_t *last = (ErtsAllctrDDBlock_t *) ilast;
- erts_atomic_init_nob(&ddq->tail.data.marker.atmc_next, ERTS_AINT_NULL);
- itmp = erts_atomic_cmpxchg_relb(&last->atmc_next,
+ erts_atomic_init_nob(&ddq->tail.data.marker.u.atmc_next, ERTS_AINT_NULL);
+ itmp = erts_atomic_cmpxchg_relb(&last->u.atmc_next,
(erts_aint_t) &ddq->tail.data.marker,
ERTS_AINT_NULL);
if (itmp == ERTS_AINT_NULL) {
@@ -1933,7 +1970,7 @@ ddq_dequeue(ErtsAllctrDDQueue_t *ddq)
ASSERT(ddq->head.used_marker);
ddq->head.used_marker = 0;
blk = ((ErtsAllctrDDBlock_t *)
- erts_atomic_read_nob(&blk->atmc_next));
+ erts_atomic_read_nob(&blk->u.atmc_next));
if (blk == ddq->head.unref_end) {
ddq->head.first = blk;
return NULL;
@@ -1941,7 +1978,7 @@ ddq_dequeue(ErtsAllctrDDQueue_t *ddq)
}
ddq->head.first = ((ErtsAllctrDDBlock_t *)
- erts_atomic_read_nob(&blk->atmc_next));
+ erts_atomic_read_nob(&blk->u.atmc_next));
ASSERT(ddq->head.first);
@@ -2003,19 +2040,13 @@ check_pending_dealloc_carrier(Allctr_t *allctr,
int *need_more_work);
static void
-handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr)
+handle_delayed_fix_dealloc(Allctr_t *allctr, ErtsAlcType_t type, Uint32 flags,
+ void *ptr)
{
- ErtsAlcType_t type;
-
- type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type;
-
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE
- <= (type & ~ERTS_ALC_FIX_NO_UNUSE));
- ASSERT((type & ~ERTS_ALC_FIX_NO_UNUSE)
- <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+ ASSERT(ERTS_ALC_IS_FIX_TYPE(type));
if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
- fix_nocpool_free(allctr, (type & ~ERTS_ALC_FIX_NO_UNUSE), ptr);
+ fix_nocpool_free(allctr, type, ptr);
else {
Block_t *blk = UMEM2BLK(ptr);
Carrier_t *busy_pcrr_p;
@@ -2030,20 +2061,24 @@ handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr)
NULL, &busy_pcrr_p);
if (used_allctr == allctr) {
doit:
- fix_cpool_free(allctr, (type & ~ERTS_ALC_FIX_NO_UNUSE),
- ptr, &busy_pcrr_p,
- !(type & ERTS_ALC_FIX_NO_UNUSE));
+ fix_cpool_free(allctr, type, flags, ptr, &busy_pcrr_p);
clear_busy_pool_carrier(allctr, busy_pcrr_p);
}
else {
/* Carrier migrated; need to redirect block to new owner... */
- int cinit = used_allctr->dd.ix - allctr->dd.ix;
+ ErtsAllctrDDBlock_t *dd_block;
+ int cinit;
+
+ dd_block = (ErtsAllctrDDBlock_t*)ptr;
+ dd_block->flags = flags;
+ dd_block->type = type;
ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
DEC_CC(allctr->calls.this_free);
- ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type;
+ cinit = used_allctr->dd.ix - allctr->dd.ix;
+
if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
erts_alloc_notify_delayed_dealloc(used_allctr->ix);
}
@@ -2067,7 +2102,6 @@ handle_delayed_dealloc(Allctr_t *allctr,
int need_mr_wrk = 0;
int have_checked_incoming = 0;
int ops = 0;
- ErtsAlcFixList_t *fix;
int res;
ErtsAllctrDDQueue_t *ddq;
@@ -2076,8 +2110,6 @@ handle_delayed_dealloc(Allctr_t *allctr,
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
- fix = allctr->fix;
-
ddq = &allctr->dd.q;
res = 0;
@@ -2166,16 +2198,27 @@ handle_delayed_dealloc(Allctr_t *allctr,
}
}
else {
+ ErtsAllctrDDBlock_t *dd_block;
+ ErtsAlcType_t type;
+ Uint32 flags;
+
+ dd_block = (ErtsAllctrDDBlock_t*)ptr;
+ flags = dd_block->flags;
+ type = dd_block->type;
+
+ flags |= DEALLOC_FLG_REDIRECTED;
+
ASSERT(IS_SBC_BLK(blk) || (ABLK_TO_MBC(blk) !=
ErtsContainerStruct(blk, Carrier_t,
cpool.homecoming_dd.blk)));
INC_CC(allctr->calls.this_free);
- if (fix)
- handle_delayed_fix_dealloc(allctr, ptr);
- else
- dealloc_block(allctr, ptr, NULL, 1);
+ if (ERTS_ALC_IS_FIX_TYPE(type)) {
+ handle_delayed_fix_dealloc(allctr, type, flags, ptr);
+ } else {
+ dealloc_block(allctr, type, flags, ptr, NULL);
+ }
}
}
@@ -2203,8 +2246,10 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type,
void *ptr,
int cinit)
{
- if (allctr->fix)
- ((ErtsAllctrFixDDBlock_t*) ptr)->fix_type = type;
+ ErtsAllctrDDBlock_t *dd_block = ((ErtsAllctrDDBlock_t*)ptr);
+
+ dd_block->type = type;
+ dd_block->flags = 0;
if (ddq_enqueue(&allctr->dd.q, ptr, cinit))
erts_alloc_notify_delayed_dealloc(allctr->ix);
@@ -2234,10 +2279,7 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)
if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
return;
- allctr->cpool.check_limit_count--;
- if (--allctr->cpool.check_limit_count <= 0)
- set_new_allctr_abandon_limit(allctr);
-
+ ASSERT(allctr->cpool.abandon_limit == allctr_abandon_limit(allctr));
ASSERT(erts_thr_progress_is_managed_thread());
if (allctr->cpool.disable_abandon)
@@ -2255,7 +2297,7 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)
if (allctr->main_carrier == crr)
return;
- if (crr->cpool.blocks_size > crr->cpool.abandon_limit)
+ if (crr->cpool.total_blocks_size > crr->cpool.abandon_limit)
return;
if (crr->cpool.thr_prgr != ERTS_THR_PRGR_INVALID
@@ -2291,24 +2333,26 @@ erts_alcu_check_delayed_dealloc(Allctr_t *allctr,
ERTS_ALCU_DD_OPS_LIM_LOW, NULL, NULL, NULL)
static void
-dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_redirect)
+dealloc_block(Allctr_t *allctr, ErtsAlcType_t type, Uint32 flags, void *ptr,
+ ErtsAlcFixList_t *fix)
{
Block_t *blk = UMEM2BLK(ptr);
+ ASSERT(!fix || type == fix->type);
+
ERTS_LC_ASSERT(!allctr->thread_safe
|| erts_lc_mtx_is_locked(&allctr->mutex));
if (IS_SBC_BLK(blk)) {
destroy_carrier(allctr, blk, NULL);
if (fix && ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
- ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type;
- if (!(type & ERTS_ALC_FIX_NO_UNUSE))
+ if (!(flags & DEALLOC_FLG_FIX_SHRINK))
fix->u.cpool.used--;
fix->u.cpool.allocated--;
}
}
else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
- mbc_free(allctr, ptr, NULL);
+ mbc_free(allctr, type, ptr, NULL);
else {
Carrier_t *busy_pcrr_p;
Allctr_t *used_allctr;
@@ -2317,22 +2361,29 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_
NULL, &busy_pcrr_p);
if (used_allctr == allctr) {
if (fix) {
- ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type;
- if (!(type & ERTS_ALC_FIX_NO_UNUSE))
+ if (!(flags & DEALLOC_FLG_FIX_SHRINK))
fix->u.cpool.used--;
fix->u.cpool.allocated--;
}
- mbc_free(allctr, ptr, &busy_pcrr_p);
+ mbc_free(allctr, type, ptr, &busy_pcrr_p);
clear_busy_pool_carrier(allctr, busy_pcrr_p);
}
else {
/* Carrier migrated; need to redirect block to new owner... */
- int cinit = used_allctr->dd.ix - allctr->dd.ix;
+ ErtsAllctrDDBlock_t *dd_block;
+ int cinit;
+
+ dd_block = (ErtsAllctrDDBlock_t*)ptr;
+ dd_block->flags = flags;
+ dd_block->type = type;
ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
- if (dec_cc_on_redirect)
+ if (flags & DEALLOC_FLG_REDIRECTED)
DEC_CC(allctr->calls.this_free);
+
+ cinit = used_allctr->dd.ix - allctr->dd.ix;
+
if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
erts_alloc_notify_delayed_dealloc(used_allctr->ix);
}
@@ -2498,9 +2549,155 @@ mbc_alloc(Allctr_t *allctr, Uint size)
return BLK2UMEM(blk);
}
+typedef struct {
+ char *ptr;
+ UWord size;
+} ErtsMemDiscardRegion;
+
+/* Construct a discard region for the user memory of a free block, letting the
+ * OS reclaim its physical memory when required.
+ *
+ * Note that we're ignoring both the footer and everything that comes before
+ * the minimum block size as the allocator uses those areas to manage the
+ * block. */
+static void ERTS_INLINE
+mem_discard_start(Allctr_t *allocator, Block_t *block,
+ ErtsMemDiscardRegion *out)
+{
+ UWord size = BLK_SZ(block);
+
+ ASSERT(size >= allocator->min_block_size);
+
+ if (size > (allocator->min_block_size + FBLK_FTR_SZ)) {
+ out->size = size - allocator->min_block_size - FBLK_FTR_SZ;
+ } else {
+ out->size = 0;
+ }
+
+ out->ptr = (char*)block + allocator->min_block_size;
+}
+
+/* Expands a discard region into a neighboring free block, allowing us to
+ * discard the block header and first page.
+ *
+ * This is very important in small-allocation scenarios where no single block
+ * is large enough to be discarded on its own. */
+static void ERTS_INLINE
+mem_discard_coalesce(Allctr_t *allocator, Block_t *neighbor,
+ ErtsMemDiscardRegion *region)
+{
+ char *neighbor_start;
+
+ ASSERT(IS_FREE_BLK(neighbor));
+
+ neighbor_start = (char*)neighbor;
+
+ if (region->ptr >= neighbor_start) {
+ char *region_start_page;
+
+ region_start_page = region->ptr - SYS_PAGE_SIZE;
+ region_start_page = (char*)((UWord)region_start_page & ~SYS_PAGE_SZ_MASK);
+
+ /* Expand if our first page begins within the previous free block's
+ * unused data. */
+ if (region_start_page >= (neighbor_start + allocator->min_block_size)) {
+ region->size += (region->ptr - region_start_page) - FBLK_FTR_SZ;
+ region->ptr = region_start_page;
+ }
+ } else {
+ char *region_end_page;
+ UWord neighbor_size;
+
+ ASSERT(region->ptr <= neighbor_start);
+
+ region_end_page = region->ptr + region->size + SYS_PAGE_SIZE;
+ region_end_page = (char*)((UWord)region_end_page & ~SYS_PAGE_SZ_MASK);
+
+ neighbor_size = BLK_SZ(neighbor) - FBLK_FTR_SZ;
+
+ /* Expand if our last page ends anywhere within the next free block,
+ * sans the footer we'll inherit. */
+ if (region_end_page < neighbor_start + neighbor_size) {
+ region->size += region_end_page - (region->ptr + region->size);
+ }
+ }
+}
+
+static void ERTS_INLINE
+mem_discard_finish(Allctr_t *allocator, Block_t *block,
+ ErtsMemDiscardRegion *region)
+{
+#ifdef DEBUG
+ char *block_start, *block_end;
+ UWord block_size;
+
+ block_size = BLK_SZ(block);
+
+ /* Ensure that the region is completely covered by the legal area of the
+ * free block. This must hold even when the region is too small to be
+ * discarded. */
+ if (region->size > 0) {
+ ASSERT(block_size > allocator->min_block_size + FBLK_FTR_SZ);
+
+ block_start = (char*)block + allocator->min_block_size;
+ block_end = (char*)block + block_size - FBLK_FTR_SZ;
+
+ ASSERT(region->size == 0 ||
+ (region->ptr + region->size <= block_end &&
+ region->ptr >= block_start &&
+ region->size <= block_size));
+ }
+#else
+ (void)allocator;
+ (void)block;
+#endif
+
+ if (region->size > SYS_PAGE_SIZE) {
+ UWord align_offset, size;
+ char *ptr;
+
+ align_offset = SYS_PAGE_SIZE - ((UWord)region->ptr & SYS_PAGE_SZ_MASK);
+
+ size = (region->size - align_offset) & ~SYS_PAGE_SZ_MASK;
+ ptr = region->ptr + align_offset;
+
+ if (size > 0) {
+ ASSERT(!((UWord)ptr & SYS_PAGE_SZ_MASK));
+ ASSERT(!(size & SYS_PAGE_SZ_MASK));
+
+ erts_mem_discard(ptr, size);
+ }
+ }
+}
+
+static void
+carrier_mem_discard_free_blocks(Allctr_t *allocator, Carrier_t *carrier)
+{
+ static const int MAX_BLOCKS_TO_DISCARD = 100;
+ Block_t *block;
+ int i;
+
+ block = allocator->first_fblk_in_mbc(allocator, carrier);
+ i = 0;
+
+ while (block != NULL && i < MAX_BLOCKS_TO_DISCARD) {
+ ErtsMemDiscardRegion region;
+
+ ASSERT(IS_FREE_BLK(block));
+
+ mem_discard_start(allocator, block, &region);
+ mem_discard_finish(allocator, block, &region);
+
+ block = allocator->next_fblk_in_mbc(allocator, carrier, block);
+ i++;
+ }
+}
+
static void
-mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)
+mbc_free(Allctr_t *allctr, ErtsAlcType_t type, void *p, Carrier_t **busy_pcrr_pp)
{
+ ErtsMemDiscardRegion discard_region = {0};
+ int discard;
Uint is_first_blk;
Uint is_last_blk;
Uint blk_sz;
@@ -2516,12 +2713,28 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)
ASSERT(IS_MBC_BLK(blk));
ASSERT(blk_sz >= allctr->min_block_size);
+#ifndef DEBUG
+ /* We want to mark freed blocks as reclaimable to the OS, but it's a fairly
+ * expensive operation which doesn't do much good if we use it again soon
+ * after, so we limit it to deallocations on pooled carriers. */
+ discard = busy_pcrr_pp && *busy_pcrr_pp;
+#else
+ /* Always discard in debug mode, regardless of whether we're in the pool or
+ * not. */
+ discard = 1;
+#endif
+
+ if (discard) {
+ mem_discard_start(allctr, blk, &discard_region);
+ }
+
HARD_CHECK_BLK_CARRIER(allctr, blk);
crr = ABLK_TO_MBC(blk);
ERTS_ALC_CPOOL_FREE_OP(allctr);
- STAT_MBC_BLK_FREE(allctr, crr, busy_pcrr_pp, blk_sz, alcu_flgs);
+
+ STAT_MBC_BLK_FREE(allctr, type, crr, busy_pcrr_pp, blk_sz, alcu_flgs);
is_first_blk = IS_MBC_FIRST_ABLK(allctr, blk);
is_last_blk = IS_LAST_BLK(blk);
@@ -2532,6 +2745,10 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)
blk = PREV_BLK(blk);
(*allctr->unlink_free_block)(allctr, blk);
+ if (discard) {
+ mem_discard_coalesce(allctr, blk, &discard_region);
+ }
+
blk_sz += MBC_FBLK_SZ(blk);
is_first_blk = IS_MBC_FIRST_FBLK(allctr, blk);
SET_MBC_FBLK_SZ(blk, blk_sz);
@@ -2547,6 +2764,11 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)
if (IS_FREE_BLK(nxt_blk)) {
/* Coalesce with next block... */
(*allctr->unlink_free_block)(allctr, nxt_blk);
+
+ if (discard) {
+ mem_discard_coalesce(allctr, nxt_blk, &discard_region);
+ }
+
blk_sz += MBC_FBLK_SZ(nxt_blk);
SET_MBC_FBLK_SZ(blk, blk_sz);
@@ -2582,16 +2804,22 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)
else {
(*allctr->link_free_block)(allctr, blk);
HARD_CHECK_BLK_CARRIER(allctr, blk);
- if (busy_pcrr_pp && *busy_pcrr_pp)
+
+ if (discard) {
+ mem_discard_finish(allctr, blk, &discard_region);
+ }
+
+ if (busy_pcrr_pp && *busy_pcrr_pp) {
update_pooled_tree(allctr, crr, blk_sz);
- else
+ } else {
check_abandon_carrier(allctr, blk, busy_pcrr_pp);
+ }
}
}
static void *
-mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
- Carrier_t **busy_pcrr_pp)
+mbc_realloc(Allctr_t *allctr, ErtsAlcType_t type, void *p, Uint size,
+ Uint32 alcu_flgs, Carrier_t **busy_pcrr_pp)
{
void *new_p;
Uint old_blk_sz;
@@ -2629,7 +2857,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
new_blk = UMEM2BLK(new_p);
ASSERT(!(IS_MBC_BLK(new_blk) && ABLK_TO_MBC(new_blk) == *busy_pcrr_pp));
sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
- mbc_free(allctr, p, busy_pcrr_pp);
+ mbc_free(allctr, type, p, busy_pcrr_pp);
return new_p;
}
@@ -2706,7 +2934,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
crr = ABLK_TO_MBC(blk);
ERTS_ALC_CPOOL_REALLOC_OP(allctr);
- STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs);
+ STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);
STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);
ASSERT(MBC_BLK_SZ(blk) >= allctr->min_block_size);
@@ -2810,7 +3038,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
}
ERTS_ALC_CPOOL_REALLOC_OP(allctr);
- STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs);
+ STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);
STAT_MBC_BLK_ALLOC(allctr, crr, blk_sz, alcu_flgs);
ASSERT(IS_ALLOCED_BLK(blk));
@@ -2871,7 +3099,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
if (!new_p)
return NULL;
sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
- mbc_free(allctr, p, busy_pcrr_pp);
+ mbc_free(allctr, type, p, busy_pcrr_pp);
return new_p;
@@ -2901,7 +3129,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
1);
new_p = BLK2UMEM(new_blk);
sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
- mbc_free(allctr, p, NULL);
+ mbc_free(allctr, type, p, NULL);
return new_p;
}
else {
@@ -2958,7 +3186,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
0);
ERTS_ALC_CPOOL_FREE_OP(allctr);
- STAT_MBC_BLK_FREE(allctr, crr, NULL, old_blk_sz, alcu_flgs);
+ STAT_MBC_BLK_FREE(allctr, type, crr, NULL, old_blk_sz, alcu_flgs);
return new_p;
}
@@ -2969,7 +3197,6 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
#define ERTS_ALC_MAX_DEALLOC_CARRIER 10
#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 100
-#define ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT 100
#define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS 3
#define ERTS_ALC_CPOOL_PTR_MOD_MRK (((erts_aint_t) 1) << 0)
@@ -2996,14 +3223,11 @@ typedef union {
# error "Carrier pool implementation assumes ERTS_ALC_A_MIN > ERTS_ALC_A_INVALID"
#endif
-/*
- * The pool is only allowed to be manipulated by managed
- * threads except in the alloc_SUITE:cpool case. In this
- * test case carrier_pool[ERTS_ALC_A_INVALID] will be
- * used.
- */
+/* The pools are only allowed to be manipulated by managed threads except in
+ * the alloc_SUITE:cpool test, where only test_carrier_pool is used. */
-static ErtsAlcCrrPool_t carrier_pool[ERTS_ALC_A_MAX+1] erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+static ErtsAlcCrrPool_t firstfit_carrier_pool;
+static ErtsAlcCrrPool_t test_carrier_pool;
#define ERTS_ALC_CPOOL_MAX_BACKOFF (1 << 8)
@@ -3024,12 +3248,12 @@ backoff(int n)
static int
cpool_dbg_is_in_pool(Allctr_t *allctr, Carrier_t *crr)
{
- ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
ErtsAlcCPoolData_t *cpdp = sentinel;
Carrier_t *tmp_crr;
while (1) {
- cpdp = (ErtsAlcCPoolData_t *) (erts_atomic_read_ddrb(&cpdp->next) & ~FLG_MASK);
+ cpdp = (ErtsAlcCPoolData_t *) (erts_atomic_read_ddrb(&cpdp->next) & ~CRR_FLG_MASK);
if (cpdp == sentinel)
return 0;
tmp_crr = (Carrier_t *) (((char *) cpdp) - offsetof(Carrier_t, cpool));
@@ -3041,7 +3265,7 @@ cpool_dbg_is_in_pool(Allctr_t *allctr, Carrier_t *crr)
static int
cpool_is_empty(Allctr_t *allctr)
{
- ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
return ((erts_atomic_read_rb(&sentinel->next) == (erts_aint_t) sentinel)
&& (erts_atomic_read_rb(&sentinel->prev) == (erts_aint_t) sentinel));
}
@@ -3131,16 +3355,31 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
{
ErtsAlcCPoolData_t *cpd1p, *cpd2p;
erts_aint_t val;
- ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
Allctr_t *orig_allctr = crr->cpool.orig_allctr;
- ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
+ ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */
|| erts_thr_progress_is_managed_thread());
- erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size,
- (erts_aint_t) crr->cpool.blocks_size);
- erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks,
- (erts_aint_t) crr->cpool.blocks);
+ {
+ int alloc_no = allctr->alloc_no;
+
+ ERTS_ALC_CPOOL_ASSERT(
+ erts_atomic_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no]) >= 0 &&
+ crr->cpool.blocks_size[alloc_no] >= 0);
+
+ ERTS_ALC_CPOOL_ASSERT(
+ erts_atomic_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]) >= 0 &&
+ crr->cpool.blocks[alloc_no] >= 0);
+
+ /* We only modify the counter for our current type since the others are
+ * conceptually still in the pool. */
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
+ ((erts_aint_t) crr->cpool.blocks_size[alloc_no]));
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no],
+ ((erts_aint_t) crr->cpool.blocks[alloc_no]));
+ }
+
erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size,
(erts_aint_t) CARRIER_SZ(crr));
erts_atomic_inc_nob(&orig_allctr->cpool.stat.no_carriers);
@@ -3213,10 +3452,10 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)
ErtsAlcCPoolData_t *cpd1p, *cpd2p;
erts_aint_t val;
#ifdef ERTS_ALC_CPOOL_DEBUG
- ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ ErtsAlcCPoolData_t *sentinel = allctr->cpool.sentinel;
#endif
- ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
+ ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */
|| erts_thr_progress_is_managed_thread());
ERTS_ALC_CPOOL_ASSERT(sentinel != &crr->cpool);
@@ -3292,28 +3531,43 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)
crr->cpool.thr_prgr = erts_thr_progress_later(NULL);
- erts_atomic_add_nob(&prev_allctr->cpool.stat.blocks_size,
- -((erts_aint_t) crr->cpool.blocks_size));
- erts_atomic_add_nob(&prev_allctr->cpool.stat.no_blocks,
- -((erts_aint_t) crr->cpool.blocks));
- erts_atomic_add_nob(&prev_allctr->cpool.stat.carriers_size,
+ {
+ Allctr_t *orig_allctr = crr->cpool.orig_allctr;
+ int alloc_no = allctr->alloc_no;
+
+ ERTS_ALC_CPOOL_ASSERT(orig_allctr == prev_allctr);
+
+ ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks_size[alloc_no] <=
+ erts_atomic_read_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no]));
+
+ ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks[alloc_no] <=
+ erts_atomic_read_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no]));
+
+ /* We only modify the counters for our current type since the others
+ * were, conceptually, never taken out of the pool. */
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size[alloc_no],
+ -((erts_aint_t) crr->cpool.blocks_size[alloc_no]));
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks[alloc_no],
+ -((erts_aint_t) crr->cpool.blocks[alloc_no]));
+
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size,
-((erts_aint_t) CARRIER_SZ(crr)));
- erts_atomic_dec_wb(&prev_allctr->cpool.stat.no_carriers);
+ erts_atomic_dec_wb(&orig_allctr->cpool.stat.no_carriers);
+ }
}
static Carrier_t *
cpool_fetch(Allctr_t *allctr, UWord size)
{
- enum { IGNORANT, HAS_SEEN_SENTINEL, THE_LAST_ONE } loop_state;
- int i;
+ int i, seen_sentinel;
Carrier_t *crr;
Carrier_t *reinsert_crr = NULL;
ErtsAlcCPoolData_t *cpdp;
ErtsAlcCPoolData_t *cpool_entrance = NULL;
ErtsAlcCPoolData_t *sentinel;
- ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
+ ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_TEST /* testcase */
|| erts_thr_progress_is_managed_thread());
i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT;
@@ -3415,48 +3669,39 @@ cpool_fetch(Allctr_t *allctr, UWord size)
/*
* Finally search the shared pool and try employ foreign carriers
*/
- sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ sentinel = allctr->cpool.sentinel;
if (cpool_entrance) {
/*
* We saw a pooled carried above, use it as entrance into the pool
*/
- cpdp = cpool_entrance;
}
else {
/*
- * No pooled carried seen above. Start search at cpool sentinel,
+ * No pooled carrier seen above. Start search at cpool sentinel,
* but begin by passing one element before trying to fetch.
* This in order to avoid contention with threads inserting elements.
*/
- cpool_entrance = sentinel;
- cpdp = cpool_aint2cpd(cpool_read(&cpool_entrance->prev));
- if (cpdp == sentinel)
+ cpool_entrance = cpool_aint2cpd(cpool_read(&sentinel->prev));
+ if (cpool_entrance == sentinel)
goto check_dc_list;
}
- loop_state = IGNORANT;
+ cpdp = cpool_entrance;
+ seen_sentinel = 0;
do {
erts_aint_t exp;
cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
- if (cpdp == cpool_entrance) {
- if (cpool_entrance == sentinel) {
- cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
- if (cpdp == sentinel)
- break;
- }
- loop_state = THE_LAST_ONE;
- }
- else if (cpdp == sentinel) {
- if (loop_state == HAS_SEEN_SENTINEL) {
+ if (cpdp == sentinel) {
+ if (seen_sentinel) {
/* We been here before. cpool_entrance must have been removed */
INC_CC(allctr->cpool.stat.entrance_removed);
break;
}
- cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
- if (cpdp == sentinel)
- break;
- loop_state = HAS_SEEN_SENTINEL;
+ seen_sentinel = 1;
+ continue;
}
+ ASSERT(cpdp != cpool_entrance || seen_sentinel);
+
crr = ErtsContainerStruct(cpdp, Carrier_t, cpool);
exp = erts_atomic_read_rb(&crr->allctr);
@@ -3489,7 +3734,7 @@ cpool_fetch(Allctr_t *allctr, UWord size)
INC_CC(allctr->cpool.stat.fail_shared);
return NULL;
}
- }while (loop_state != THE_LAST_ONE);
+ }while (cpdp != cpool_entrance);
check_dc_list:
/* Last; check our own pending dealloc carrier list... */
@@ -3668,8 +3913,9 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
crr->cpool.orig_allctr = allctr;
crr->cpool.thr_prgr = ERTS_THR_PRGR_INVALID;
erts_atomic_init_nob(&crr->cpool.max_size, 0);
- crr->cpool.blocks = 0;
- crr->cpool.blocks_size = 0;
+ sys_memset(&crr->cpool.blocks_size, 0, sizeof(crr->cpool.blocks_size));
+ sys_memset(&crr->cpool.blocks, 0, sizeof(crr->cpool.blocks));
+ crr->cpool.total_blocks_size = 0;
if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
crr->cpool.abandon_limit = 0;
else {
@@ -3684,14 +3930,14 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
crr->cpool.state = ERTS_MBC_IS_HOME;
}
-static void
-set_new_allctr_abandon_limit(Allctr_t *allctr)
+
+
+static UWord
+allctr_abandon_limit(Allctr_t *allctr)
{
UWord limit;
UWord csz;
- allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT;
-
csz = allctr->mbcs.curr.norm.mseg.size;
csz += allctr->mbcs.curr.norm.sys_alloc.size;
@@ -3701,7 +3947,13 @@ set_new_allctr_abandon_limit(Allctr_t *allctr)
else
limit = (csz/100)*allctr->cpool.util_limit;
- allctr->cpool.abandon_limit = limit;
+ return limit;
+}
+
+static void ERTS_INLINE
+set_new_allctr_abandon_limit(Allctr_t *allctr)
+{
+ allctr->cpool.abandon_limit = allctr_abandon_limit(allctr);
}
static void
@@ -3713,7 +3965,9 @@ abandon_carrier(Allctr_t *allctr, Carrier_t *crr)
unlink_carrier(&allctr->mbc_list, crr);
allctr->remove_mbc(allctr, crr);
- set_new_allctr_abandon_limit(allctr);
+
+ /* Mark our free blocks as unused and reclaimable to the OS. */
+ carrier_mem_discard_free_blocks(allctr, crr);
cpool_insert(allctr, crr);
@@ -3766,7 +4020,8 @@ poolify_my_carrier(Allctr_t *allctr, Carrier_t *crr)
}
static void
-cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord *bszp)
+cpool_read_stat(Allctr_t *allctr, int alloc_no,
+ UWord *nocp, UWord *cszp, UWord *nobp, UWord *bszp)
{
int i;
UWord noc = 0, csz = 0, nob = 0, bsz = 0;
@@ -3786,10 +4041,10 @@ cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord *
? erts_atomic_read_nob(&allctr->cpool.stat.carriers_size)
: 0);
tnob = (UWord) (nobp
- ? erts_atomic_read_nob(&allctr->cpool.stat.no_blocks)
+ ? erts_atomic_read_nob(&allctr->cpool.stat.no_blocks[alloc_no])
: 0);
tbsz = (UWord) (bszp
- ? erts_atomic_read_nob(&allctr->cpool.stat.blocks_size)
+ ? erts_atomic_read_nob(&allctr->cpool.stat.blocks_size[alloc_no])
: 0);
if (tnoc == noc && tcsz == csz && tnob == nob && tbsz == bsz)
break;
@@ -4044,6 +4299,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
#if HAVE_ERTS_MSEG
mbc_final_touch:
#endif
+ set_new_allctr_abandon_limit(allctr);
blk = MBC_TO_FIRST_BLK(allctr, crr);
@@ -4262,7 +4518,6 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
else {
ASSERT(IS_MBC_FIRST_FBLK(allctr, blk));
crr = FIRST_BLK_TO_MBC(allctr, blk);
- crr_sz = CARRIER_SZ(crr);
#ifdef DEBUG
if (!allctr->stopped) {
@@ -4294,15 +4549,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
else
{
unlink_carrier(&allctr->mbc_list, crr);
-#if HAVE_ERTS_MSEG
- if (IS_MSEG_CARRIER(crr)) {
- ASSERT(crr_sz % ERTS_SACRR_UNIT_SZ == 0);
- STAT_MSEG_MBC_FREE(allctr, crr_sz);
- }
- else
-#endif
- STAT_SYS_ALLOC_MBC_FREE(allctr, crr_sz);
-
+ STAT_MBC_FREE(allctr, crr);
if (allctr->remove_mbc)
allctr->remove_mbc(allctr, crr);
}
@@ -4316,7 +4563,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
LTTNG5(carrier_destroy,
ERTS_ALC_A2AD(allctr->alloc_no),
allctr->ix,
- crr_sz,
+ CARRIER_SZ(crr),
mbc_stats,
sbc_stats);
}
@@ -4394,6 +4641,8 @@ static struct {
Eterm blocks_size;
Eterm blocks;
+ Eterm foreign_blocks;
+
Eterm calls;
Eterm sys_alloc;
Eterm sys_free;
@@ -4494,6 +4743,7 @@ init_atoms(Allctr_t *allctr)
AM_INIT(carriers);
AM_INIT(blocks_size);
AM_INIT(blocks);
+ AM_INIT(foreign_blocks);
AM_INIT(calls);
AM_INIT(sys_alloc);
@@ -4629,7 +4879,6 @@ sz_info_fix(Allctr_t *allctr,
ErtsAlcFixList_t *fix = &allctr->fix[ix];
UWord alloced = fix->type_size * fix->u.cpool.allocated;
UWord used = fix->type_size * fix->u.cpool.used;
- ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;
if (print_to_p) {
fmtfn_t to = *print_to_p;
@@ -4637,14 +4886,14 @@ sz_info_fix(Allctr_t *allctr,
erts_print(to,
arg,
"fix type internal: %s %bpu %bpu\n",
- (char *) ERTS_ALC_N2TD(n),
+ (char *) ERTS_ALC_T2TD(fix->type),
alloced,
used);
}
if (hpp || szp) {
add_3tup(hpp, szp, &res,
- alloc_type_atoms[n],
+ alloc_type_atoms[ERTS_ALC_T2N(fix->type)],
bld_unstable_uint(hpp, szp, alloced),
bld_unstable_uint(hpp, szp, used));
}
@@ -4657,7 +4906,6 @@ sz_info_fix(Allctr_t *allctr,
ErtsAlcFixList_t *fix = &allctr->fix[ix];
UWord alloced = fix->type_size * fix->u.nocpool.allocated;
UWord used = fix->type_size*fix->u.nocpool.used;
- ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;
if (print_to_p) {
fmtfn_t to = *print_to_p;
@@ -4665,14 +4913,14 @@ sz_info_fix(Allctr_t *allctr,
erts_print(to,
arg,
"fix type: %s %bpu %bpu\n",
- (char *) ERTS_ALC_N2TD(n),
+ (char *) ERTS_ALC_T2TD(fix->type),
alloced,
used);
}
if (hpp || szp) {
add_3tup(hpp, szp, &res,
- alloc_type_atoms[n],
+ alloc_type_atoms[ERTS_ALC_T2N(fix->type)],
bld_unstable_uint(hpp, szp, alloced),
bld_unstable_uint(hpp, szp, used));
}
@@ -4745,9 +4993,9 @@ info_cpool(Allctr_t *allctr,
noc = csz = nob = bsz = ~0;
if (print_to_p || hpp) {
if (sz_only)
- cpool_read_stat(allctr, NULL, &csz, NULL, &bsz);
+ cpool_read_stat(allctr, allctr->alloc_no, NULL, &csz, NULL, &bsz);
else
- cpool_read_stat(allctr, &noc, &csz, &nob, &bsz);
+ cpool_read_stat(allctr, allctr->alloc_no, &noc, &csz, &nob, &bsz);
}
if (print_to_p) {
@@ -4762,6 +5010,10 @@ info_cpool(Allctr_t *allctr,
}
if (hpp || szp) {
+ Eterm foreign_blocks;
+ int i;
+
+ foreign_blocks = NIL;
res = NIL;
if (!sz_only) {
@@ -4808,22 +5060,61 @@ info_cpool(Allctr_t *allctr,
add_3tup(hpp, szp, &res, am.entrance_removed,
bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.entrance_removed)),
bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.entrance_removed)));
+ }
add_2tup(hpp, szp, &res,
am.carriers_size,
bld_unstable_uint(hpp, szp, csz));
- }
- if (!sz_only)
- add_2tup(hpp, szp, &res,
- am.carriers,
- bld_unstable_uint(hpp, szp, noc));
+
+ if (!sz_only) {
+ add_2tup(hpp, szp, &res,
+ am.carriers,
+ bld_unstable_uint(hpp, szp, noc));
+ }
+
add_2tup(hpp, szp, &res,
am.blocks_size,
bld_unstable_uint(hpp, szp, bsz));
- if (!sz_only)
+
+ if (!sz_only) {
add_2tup(hpp, szp, &res,
am.blocks,
bld_unstable_uint(hpp, szp, nob));
+ }
+
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ const char *name_str;
+ Eterm name, info;
+
+ if (i == allctr->alloc_no) {
+ continue;
+ }
+
+ cpool_read_stat(allctr, i, NULL, NULL, &nob, &bsz);
+
+ if (bsz == 0 && (nob == 0 || sz_only)) {
+ continue;
+ }
+
+ name_str = ERTS_ALC_A2AD(i);
+ info = NIL;
+
+ add_2tup(hpp, szp, &info,
+ am.blocks_size,
+ bld_unstable_uint(hpp, szp, bsz));
+
+ if (!sz_only) {
+ add_2tup(hpp, szp, &info,
+ am.blocks,
+ bld_unstable_uint(hpp, szp, nob));
+ }
+
+ name = am_atom_put(name_str, sys_strlen(name_str));
+
+ add_2tup(hpp, szp, &foreign_blocks, name, info);
+ }
+
+ add_2tup(hpp, szp, &res, am.foreign_blocks, foreign_blocks);
}
return res;
@@ -5459,6 +5750,19 @@ erts_alcu_info(Allctr_t *allctr,
return res;
}
+void
+erts_alcu_foreign_size(Allctr_t *allctr, ErtsAlcType_t alloc_no, AllctrSize_t *size)
+{
+ if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
+ UWord csz, bsz;
+ cpool_read_stat(allctr, alloc_no, NULL, &csz, NULL, &bsz);
+ size->carriers = csz;
+ size->blocks = bsz;
+ } else {
+ size->carriers = 0;
+ size->blocks = 0;
+ }
+}
void
erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz)
@@ -5477,7 +5781,7 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
UWord csz, bsz;
- cpool_read_stat(allctr, NULL, &csz, NULL, &bsz);
+ cpool_read_stat(allctr, allctr->alloc_no, NULL, &csz, NULL, &bsz);
size->blocks += bsz;
size->carriers += csz;
}
@@ -5522,6 +5826,11 @@ do_erts_alcu_alloc(ErtsAlcType_t type, Allctr_t *allctr, Uint size)
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
+ /* Reject sizes that can't fit into the header word. */
+ if (size > ~BLK_FLG_MASK) {
+ return NULL;
+ }
+
#if ALLOC_ZERO_EQ_NULL
if (!size)
return NULL;
@@ -5688,12 +5997,11 @@ do_erts_alcu_free(ErtsAlcType_t type, Allctr_t *allctr, void *p,
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
if (p) {
-
INC_CC(allctr->calls.this_free);
- if (allctr->fix) {
+ if (ERTS_ALC_IS_FIX_TYPE(type)) {
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
- fix_cpool_free(allctr, type, p, busy_pcrr_pp, 1);
+ fix_cpool_free(allctr, type, 0, p, busy_pcrr_pp);
else
fix_nocpool_free(allctr, type, p);
}
@@ -5702,7 +6010,7 @@ do_erts_alcu_free(ErtsAlcType_t type, Allctr_t *allctr, void *p,
if (IS_SBC_BLK(blk))
destroy_carrier(allctr, blk, NULL);
else
- mbc_free(allctr, p, busy_pcrr_pp);
+ mbc_free(allctr, type, p, busy_pcrr_pp);
}
}
}
@@ -5804,6 +6112,11 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
return res;
}
+ /* Reject sizes that can't fit into the header word. */
+ if (size > ~BLK_FLG_MASK) {
+ return NULL;
+ }
+
#if ALLOC_ZERO_EQ_NULL
if (!size) {
ASSERT(p);
@@ -5820,7 +6133,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
if (size < allctr->sbc_threshold) {
if (IS_MBC_BLK(blk))
- res = mbc_realloc(allctr, p, size, alcu_flgs, busy_pcrr_pp);
+ res = mbc_realloc(allctr, type, p, size, alcu_flgs, busy_pcrr_pp);
else {
Uint used_sz = SBC_HEADER_SIZE + ABLK_HDR_SZ + size;
Uint crr_sz;
@@ -5879,7 +6192,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
sys_memcpy((void *) res,
(void *) p,
MIN(MBC_ABLK_SZ(blk) - ABLK_HDR_SZ, size));
- mbc_free(allctr, p, busy_pcrr_pp);
+ mbc_free(allctr, type, p, busy_pcrr_pp);
}
else
res = NULL;
@@ -6247,12 +6560,21 @@ int
erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
{
/* erts_alcu_start assumes that allctr has been zeroed */
+ int i;
if (((UWord)allctr & ERTS_CRR_ALCTR_FLG_MASK) != 0) {
erts_exit(ERTS_ABORT_EXIT, "%s:%d:erts_alcu_start: Alignment error\n",
__FILE__, __LINE__);
}
+ /* The various fields packed into the header word must not overlap */
+ ERTS_CT_ASSERT(!(MBC_ABLK_OFFSET_MASK & MBC_ABLK_SZ_MASK));
+ ERTS_CT_ASSERT(!(MBC_ABLK_OFFSET_MASK & BLK_FLG_MASK));
+ ERTS_CT_ASSERT(!(MBC_ABLK_SZ_MASK & BLK_FLG_MASK));
+ ERTS_CT_ASSERT(!(MBC_FBLK_SZ_MASK & BLK_FLG_MASK));
+ ERTS_CT_ASSERT(!(SBC_BLK_SZ_MASK & BLK_FLG_MASK));
+ ERTS_CT_ASSERT(!(CRR_SZ_MASK & CRR_FLG_MASK));
+
if (!initialized)
goto error;
@@ -6270,6 +6592,11 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->ix = init->ix;
allctr->alloc_no = init->alloc_no;
+ allctr->alloc_strat = init->alloc_strat;
+
+ ASSERT(allctr->alloc_no >= ERTS_ALC_A_MIN &&
+ allctr->alloc_no <= ERTS_ALC_A_MAX);
+
if (allctr->alloc_no < ERTS_ALC_A_MIN
|| ERTS_ALC_A_MAX < allctr->alloc_no)
allctr->alloc_no = ERTS_ALC_A_INVALID;
@@ -6322,8 +6649,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
+ sizeof(FreeBlkFtr_t));
if (init->tpref) {
Uint sz = ABLK_HDR_SZ;
- sz += (init->fix ?
- sizeof(ErtsAllctrFixDDBlock_t) : sizeof(ErtsAllctrDDBlock_t));
+ sz += sizeof(ErtsAllctrDDBlock_t);
sz = UNIT_CEILING(sz);
if (sz > allctr->min_block_size)
allctr->min_block_size = sz;
@@ -6334,15 +6660,29 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->cpool.dc_list.last = NULL;
allctr->cpool.abandon_limit = 0;
allctr->cpool.disable_abandon = 0;
- erts_atomic_init_nob(&allctr->cpool.stat.blocks_size, 0);
- erts_atomic_init_nob(&allctr->cpool.stat.no_blocks, 0);
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ erts_atomic_init_nob(&allctr->cpool.stat.blocks_size[i], 0);
+ erts_atomic_init_nob(&allctr->cpool.stat.no_blocks[i], 0);
+ }
erts_atomic_init_nob(&allctr->cpool.stat.carriers_size, 0);
erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0);
- allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT;
if (!init->ts && init->acul && init->acnl) {
+ ASSERT(allctr->add_mbc);
+ ASSERT(allctr->remove_mbc);
+ ASSERT(allctr->largest_fblk_in_mbc);
+ ASSERT(allctr->first_fblk_in_mbc);
+ ASSERT(allctr->next_fblk_in_mbc);
+
allctr->cpool.util_limit = init->acul;
allctr->cpool.in_pool_limit = init->acnl;
allctr->cpool.fblk_min_limit = init->acfml;
+
+ if (allctr->alloc_strat == ERTS_ALC_S_FIRSTFIT) {
+ allctr->cpool.sentinel = &firstfit_carrier_pool.sentinel;
+ }
+ else if (allctr->alloc_no != ERTS_ALC_A_TEST) {
+ ERTS_INTERNAL_ERROR("Impossible carrier migration config.");
+ }
}
else {
allctr->cpool.util_limit = 0;
@@ -6350,6 +6690,12 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->cpool.fblk_min_limit = 0;
}
+ /* The invasive tests don't really care whether the pool is enabled or not,
+ * so we need to set this unconditionally for this allocator type. */
+ if (allctr->alloc_no == ERTS_ALC_A_TEST) {
+ allctr->cpool.sentinel = &test_carrier_pool.sentinel;
+ }
+
allctr->sbc_threshold = adjust_sbct(allctr, init->sbct);
#if HAVE_ERTS_MSEG
@@ -6461,9 +6807,9 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->fix_shrink_scheduled = 0;
for (i = 0; i < ERTS_ALC_NO_FIXED_SIZES; i++) {
allctr->fix[i].type_size = init->fix_type_size[i];
+ allctr->fix[i].type = ERTS_ALC_N2T(i + ERTS_ALC_N_MIN_A_FIXED_SIZE);
allctr->fix[i].list_size = 0;
allctr->fix[i].list = NULL;
- ASSERT(allctr->fix[i].type_size >= sizeof(ErtsAllctrFixDDBlock_t));
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
allctr->fix[i].u.cpool.min_list_size = 0;
allctr->fix[i].u.cpool.shrink_list = 0;
@@ -6512,12 +6858,16 @@ erts_alcu_stop(Allctr_t *allctr)
void
erts_alcu_init(AlcUInit_t *init)
{
- int i;
- for (i = 0; i <= ERTS_ALC_A_MAX; i++) {
- ErtsAlcCPoolData_t *sentinel = &carrier_pool[i].sentinel;
- erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel);
- erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
- }
+ ErtsAlcCPoolData_t *sentinel;
+
+ sentinel = &firstfit_carrier_pool.sentinel;
+ erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel);
+ erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
+
+ sentinel = &test_carrier_pool.sentinel;
+ erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel);
+ erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
+
ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
#if HAVE_ERTS_MSEG
ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ);
@@ -6528,6 +6878,8 @@ erts_alcu_init(AlcUInit_t *init)
#endif
allow_sys_alloc_carriers = init->sac;
+ sys_page_size = erts_sys_get_page_size();
+
#ifdef DEBUG
carrier_alignment = sizeof(Unit_t);
#endif
@@ -6699,7 +7051,7 @@ static int blockscan_cpool_yielding(blockscan_t *state)
{
ErtsAlcCPoolData_t *sentinel, *cursor;
- sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel;
+ sentinel = (state->allocator)->cpool.sentinel;
cursor = blockscan_restore_cpool_cursor(state);
if (ERTS_PROC_IS_EXITING(state->process)) {
@@ -6831,11 +7183,8 @@ static int blockscan_sweep_mbcs(blockscan_t *state)
static int blockscan_sweep_cpool(blockscan_t *state)
{
if (state->current_op != blockscan_sweep_cpool) {
- ErtsAlcCPoolData_t *sentinel;
-
SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator);
- sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel;
- state->cpool_cursor = sentinel;
+ state->cpool_cursor = (state->allocator)->cpool.sentinel;
}
state->current_op = blockscan_sweep_cpool;
@@ -7119,11 +7468,14 @@ static int gather_ahist_scan(Allctr_t *allocator,
alcu_atag_t tag;
block = SBC2BLK(allocator, carrier);
- tag = GET_BLK_ATAG(block);
- ASSERT(DBG_IS_VALID_ATAG(allocator, tag));
+ if (BLK_HAS_ATAG(block)) {
+ tag = GET_BLK_ATAG(block);
+
+ ASSERT(DBG_IS_VALID_ATAG(tag));
- gather_ahist_update(state, tag, SBC_BLK_SZ(block));
+ gather_ahist_update(state, tag, SBC_BLK_SZ(block));
+ }
} else {
UWord scanned_bytes = MBC_HEADER_SIZE(allocator);
@@ -7134,10 +7486,10 @@ static int gather_ahist_scan(Allctr_t *allocator,
while (1) {
UWord block_size = MBC_BLK_SZ(block);
- if (IS_ALLOCED_BLK(block)) {
+ if (IS_ALLOCED_BLK(block) && BLK_HAS_ATAG(block)) {
alcu_atag_t tag = GET_BLK_ATAG(block);
- ASSERT(DBG_IS_VALID_ATAG(allocator, tag));
+ ASSERT(DBG_IS_VALID_ATAG(tag));
gather_ahist_update(state, tag, block_size);
}
@@ -7159,7 +7511,7 @@ static int gather_ahist_scan(Allctr_t *allocator,
return blocks_scanned;
}
-static void gather_ahist_append_result(hist_tree_t *node, void *arg)
+static int gather_ahist_append_result(hist_tree_t *node, void *arg, Sint reds)
{
gather_ahist_t *state = (gather_ahist_t*)arg;
@@ -7193,6 +7545,7 @@ static void gather_ahist_append_result(hist_tree_t *node, void *arg)
/* Plain free is intentional. */
free(node);
+ return 1;
}
static void gather_ahist_send(gather_ahist_t *state)
@@ -7251,11 +7604,11 @@ static int gather_ahist_finish(void *arg)
state->building_result = 1;
}
- if (hist_tree_rbt_foreach_destroy_yielding(&state->hist_tree,
- &gather_ahist_append_result,
- state,
- &state->hist_tree_yield,
- BLOCKSCAN_REDUCTIONS)) {
+ if (!hist_tree_rbt_foreach_destroy_yielding(&state->hist_tree,
+ &gather_ahist_append_result,
+ state,
+ &state->hist_tree_yield,
+ BLOCKSCAN_REDUCTIONS)) {
return 1;
}
@@ -7264,10 +7617,11 @@ static int gather_ahist_finish(void *arg)
return 0;
}
-static void gather_ahist_destroy_result(hist_tree_t *node, void *arg)
+static int gather_ahist_destroy_result(hist_tree_t *node, void *arg, Sint reds)
{
(void)arg;
free(node);
+ return 1;
}
static void gather_ahist_abort(void *arg)
@@ -7297,8 +7651,6 @@ int erts_alcu_gather_alloc_histograms(Process *p, int allocator_num,
sched_id,
&allocator)) {
return 0;
- } else if (!allocator->atags) {
- return 0;
}
ensure_atoms_initialized(allocator);
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index f26ace1534..b46b311c59 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -24,6 +24,7 @@
#define ERTS_ALCU_VSN_STR "3.0"
#include "erl_alloc_types.h"
+#include "erl_alloc.h"
#define ERL_THREADS_EMU_INTERNAL__
#include "erl_threads.h"
@@ -44,6 +45,7 @@ typedef struct {
typedef struct {
char *name_prefix;
ErtsAlcType_t alloc_no;
+ ErtsAlcStrat_t alloc_strat;
int force;
int ix;
int ts;
@@ -101,6 +103,7 @@ typedef struct {
#define ERTS_DEFAULT_ALLCTR_INIT { \
NULL, \
ERTS_ALC_A_INVALID, /* (number) alloc_no: allocator number */\
+ ERTS_ALC_S_INVALID, /* (number) alloc_strat: allocator strategy */\
0, /* (bool) force: force enabled */\
0, /* (number) ix: instance index */\
1, /* (bool) ts: thread safe */\
@@ -138,6 +141,7 @@ typedef struct {
#define ERTS_DEFAULT_ALLCTR_INIT { \
NULL, \
ERTS_ALC_A_INVALID, /* (number) alloc_no: allocator number */\
+ ERTS_ALC_S_INVALID, /* (number) alloc_strat: allocator strategy */\
0, /* (bool) force: force enabled */\
0, /* (number) ix: instance index */\
1, /* (bool) ts: thread safe */\
@@ -188,6 +192,7 @@ Eterm erts_alcu_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *);
void erts_alcu_init(AlcUInit_t *);
void erts_alcu_current_size(Allctr_t *, AllctrSize_t *,
ErtsAlcUFixInfo_t *, int);
+void erts_alcu_foreign_size(Allctr_t *, ErtsAlcType_t, AllctrSize_t *);
void erts_alcu_check_delayed_dealloc(Allctr_t *, int, int *, ErtsThrPrgrVal *, int *);
erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
@@ -286,10 +291,18 @@ void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);
#define UNIT_FLOOR(X) ((X) & UNIT_MASK)
#define UNIT_CEILING(X) UNIT_FLOOR((X) + INV_UNIT_MASK)
-#define FLG_MASK INV_UNIT_MASK
-#define SBC_BLK_SZ_MASK UNIT_MASK
-#define MBC_FBLK_SZ_MASK UNIT_MASK
-#define CARRIER_SZ_MASK UNIT_MASK
+/* We store flags in the bits that no one will ever use. Generally these are
+ * the bits below the alignment size, but for blocks we also steal the highest
+ * bit since the header's a size and no one can expect to be able to allocate
+ * objects that large. */
+#define HIGHEST_WORD_BIT (((UWord) 1) << (sizeof(UWord) * CHAR_BIT - 1))
+
+#define BLK_FLG_MASK (INV_UNIT_MASK | HIGHEST_WORD_BIT)
+#define SBC_BLK_SZ_MASK (~BLK_FLG_MASK)
+#define MBC_FBLK_SZ_MASK (~BLK_FLG_MASK)
+
+#define CRR_FLG_MASK INV_UNIT_MASK
+#define CRR_SZ_MASK UNIT_MASK
#if ERTS_HAVE_MSEG_SUPER_ALIGNED \
|| (!HAVE_ERTS_MSEG && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC)
@@ -299,9 +312,9 @@ void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);
# define ERTS_SUPER_ALIGN_BITS 18
# endif
# ifdef ARCH_64
-# define MBC_ABLK_OFFSET_BITS 24
+# define MBC_ABLK_OFFSET_BITS 23
# else
-# define MBC_ABLK_OFFSET_BITS 9
+# define MBC_ABLK_OFFSET_BITS 8
/* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */
# endif
# define ERTS_SACRR_UNIT_SHIFT ERTS_SUPER_ALIGN_BITS
@@ -321,19 +334,21 @@ void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);
#endif
#if MBC_ABLK_OFFSET_BITS
-# define MBC_ABLK_OFFSET_SHIFT (sizeof(UWord)*8 - MBC_ABLK_OFFSET_BITS)
-# define MBC_ABLK_OFFSET_MASK (~((UWord)0) << MBC_ABLK_OFFSET_SHIFT)
-# define MBC_ABLK_SZ_MASK (~MBC_ABLK_OFFSET_MASK & ~FLG_MASK)
+/* The shift is reduced by 1 since the highest bit is used for a flag. */
+# define MBC_ABLK_OFFSET_SHIFT (sizeof(UWord)*8 - 1 - MBC_ABLK_OFFSET_BITS)
+# define MBC_ABLK_OFFSET_MASK \
+ (((UWORD_CONSTANT(1) << MBC_ABLK_OFFSET_BITS) - UWORD_CONSTANT(1)) \
+ << MBC_ABLK_OFFSET_SHIFT)
+# define MBC_ABLK_SZ_MASK (~MBC_ABLK_OFFSET_MASK & ~BLK_FLG_MASK)
#else
-# define MBC_ABLK_SZ_MASK (~FLG_MASK)
+# define MBC_ABLK_SZ_MASK (~BLK_FLG_MASK)
#endif
#define MBC_ABLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK)
#define MBC_FBLK_SZ(B) (ASSERT(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK)
#define SBC_BLK_SZ(B) (ASSERT(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK)
-#define CARRIER_SZ(C) \
- ((C)->chdr & CARRIER_SZ_MASK)
+#define CARRIER_SZ(C) ((C)->chdr & CRR_SZ_MASK)
typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t;
@@ -351,12 +366,20 @@ typedef struct {
#endif
} Block_t;
-typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t;
+typedef struct ErtsAllctrDDBlock__ {
+ union {
+ struct ErtsAllctrDDBlock__ *ptr_next;
+ erts_atomic_t atmc_next;
+ } u;
+ ErtsAlcType_t type;
+ Uint32 flags;
+} ErtsAllctrDDBlock_t;
-union ErtsAllctrDDBlock_t_ {
- erts_atomic_t atmc_next;
- ErtsAllctrDDBlock_t *ptr_next;
-};
+/* Deallocation was caused by shrinking a fix-list, so usage statistics has
+ * already been updated. */
+#define DEALLOC_FLG_FIX_SHRINK (1 << 0)
+/* Deallocation was redirected to another instance. */
+#define DEALLOC_FLG_REDIRECTED (1 << 1)
typedef struct {
Block_t blk;
@@ -365,11 +388,10 @@ typedef struct {
#endif
} ErtsFakeDDBlock_t;
-
-
#define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0)
#define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1)
#define LAST_BLK_HDR_FLG (((UWord) 1) << 2)
+#define ATAG_BLK_HDR_FLG HIGHEST_WORD_BIT
#define SBC_BLK_HDR_FLG /* Special flag combo for (allocated) SBC blocks */\
(THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
@@ -381,9 +403,9 @@ typedef struct {
#define HOMECOMING_MBC_BLK_HDR (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
#define IS_FREE_LAST_MBC_BLK(B) \
- (((B)->bhdr & FLG_MASK) == (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG))
+ (((B)->bhdr & BLK_FLG_MASK) == (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG))
-#define IS_SBC_BLK(B) (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG)
+#define IS_SBC_BLK(B) (((B)->bhdr & SBC_BLK_HDR_FLG) == SBC_BLK_HDR_FLG)
#define IS_MBC_BLK(B) (!IS_SBC_BLK((B)))
#define IS_FREE_BLK(B) (ASSERT(IS_MBC_BLK(B)), \
(B)->bhdr & THIS_FREE_BLK_HDR_FLG)
@@ -394,7 +416,8 @@ typedef struct {
# define ABLK_TO_MBC(B) \
(ASSERT(IS_MBC_BLK(B) && !IS_FREE_BLK(B)), \
(Carrier_t*)((ERTS_SACRR_UNIT_FLOOR((UWord)(B)) - \
- (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << ERTS_SACRR_UNIT_SHIFT))))
+ ((((B)->bhdr & ~BLK_FLG_MASK) >> MBC_ABLK_OFFSET_SHIFT) \
+ << ERTS_SACRR_UNIT_SHIFT))))
# define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B))
#else
# define FBLK_TO_MBC(B) ((B)->carrier)
@@ -433,8 +456,9 @@ typedef struct {
ErtsThrPrgrVal thr_prgr;
erts_atomic_t max_size;
UWord abandon_limit;
- UWord blocks;
- UWord blocks_size;
+ UWord blocks[ERTS_ALC_A_MAX + 1];
+ UWord blocks_size[ERTS_ALC_A_MAX + 1];
+ UWord total_blocks_size;
enum {
ERTS_MBC_IS_HOME,
ERTS_MBC_WAS_POOLED,
@@ -452,7 +476,7 @@ struct Carrier_t_ {
};
#define ERTS_ALC_CARRIER_TO_ALLCTR(C) \
- ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~FLG_MASK))
+ ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~CRR_FLG_MASK))
typedef struct {
Carrier_t *first;
@@ -530,7 +554,6 @@ typedef struct {
} head;
} ErtsAllctrDDQueue_t;
-
typedef struct {
size_t type_size;
SWord list_size;
@@ -549,6 +572,7 @@ typedef struct {
UWord used;
} cpool;
} u;
+ ErtsAlcType_t type;
} ErtsAlcFixList_t;
struct Allctr_t_ {
@@ -569,6 +593,9 @@ struct Allctr_t_ {
/* Allocator number */
ErtsAlcType_t alloc_no;
+ /* Allocator strategy */
+ ErtsAlcStrat_t alloc_strat;
+
/* Instance index */
int ix;
@@ -617,6 +644,9 @@ struct Allctr_t_ {
AOFF_RBTree_t* pooled_tree;
CarrierList_t dc_list;
+ /* the sentinel of the cpool we're attached to */
+ ErtsAlcCPoolData_t *sentinel;
+
UWord abandon_limit;
int disable_abandon;
int check_limit_count;
@@ -624,8 +654,8 @@ struct Allctr_t_ {
UWord in_pool_limit; /* acnl */
UWord fblk_min_limit; /* acmfl */
struct {
- erts_atomic_t blocks_size;
- erts_atomic_t no_blocks;
+ erts_atomic_t blocks_size[ERTS_ALC_A_MAX + 1];
+ erts_atomic_t no_blocks[ERTS_ALC_A_MAX + 1];
erts_atomic_t carriers_size;
erts_atomic_t no_carriers;
CallCounter_t fail_pooled;
@@ -657,10 +687,12 @@ struct Allctr_t_ {
void (*creating_mbc) (Allctr_t *, Carrier_t *);
void (*destroying_mbc) (Allctr_t *, Carrier_t *);
- /* The three callbacks below are needed to support carrier migration */
+ /* The five callbacks below are needed to support carrier migration. */
void (*add_mbc) (Allctr_t *, Carrier_t *);
void (*remove_mbc) (Allctr_t *, Carrier_t *);
UWord (*largest_fblk_in_mbc) (Allctr_t *, Carrier_t *);
+ Block_t * (*first_fblk_in_mbc) (Allctr_t *, Carrier_t *);
+ Block_t * (*next_fblk_in_mbc) (Allctr_t *, Carrier_t *, Block_t *);
#if HAVE_ERTS_MSEG
void* (*mseg_alloc)(Allctr_t*, Uint *size_p, Uint flags);
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index 917cb1cf10..c19d6d1b1e 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -107,9 +107,11 @@ typedef struct AOFF_Carrier_t_ AOFF_Carrier_t;
struct AOFF_Carrier_t_ {
Carrier_t crr;
- AOFF_RBTree_t rbt_node; /* My node in the carrier tree */
- AOFF_RBTree_t* root; /* Root of my block tree */
+ AOFF_RBTree_t rbt_node; /* My node in the carrier tree */
+ AOFF_RBTree_t* root; /* Root of my block tree */
+ enum AOFFSortOrder blk_order;
};
+
#define RBT_NODE_TO_MBC(PTR) ErtsContainerStruct((PTR), AOFF_Carrier_t, rbt_node)
/*
@@ -239,6 +241,9 @@ static void aoff_add_mbc(Allctr_t*, Carrier_t*);
static void aoff_remove_mbc(Allctr_t*, Carrier_t*);
static UWord aoff_largest_fblk_in_mbc(Allctr_t*, Carrier_t*);
+static Block_t *aoff_first_fblk_in_mbc(Allctr_t *, Carrier_t *);
+static Block_t *aoff_next_fblk_in_mbc(Allctr_t *, Carrier_t *, Block_t *);
+
/* Generic tree functions used by both carrier and block trees. */
static void rbt_delete(AOFF_RBTree_t** root, AOFF_RBTree_t* del);
static void rbt_insert(enum AOFFSortOrder, AOFF_RBTree_t** root, AOFF_RBTree_t* blk);
@@ -281,15 +286,28 @@ erts_aoffalc_start(AOFFAllctr_t *alc,
sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t));
+ if (aoffinit->blk_order == FF_CHAOS) {
+ const enum AOFFSortOrder orders[3] = {FF_AOFF, FF_AOBF, FF_BF};
+ int index = init->ix % (sizeof(orders) / sizeof(orders[0]));
+
+ ASSERT(init->alloc_no == ERTS_ALC_A_TEST);
+ aoffinit->blk_order = orders[index];
+ }
+
+ if (aoffinit->crr_order == FF_CHAOS) {
+ const enum AOFFSortOrder orders[2] = {FF_AGEFF, FF_AOFF};
+ int index = init->ix % (sizeof(orders) / sizeof(orders[0]));
+
+ ASSERT(init->alloc_no == ERTS_ALC_A_TEST);
+ aoffinit->crr_order = orders[index];
+ }
+
alc->blk_order = aoffinit->blk_order;
alc->crr_order = aoffinit->crr_order;
allctr->mbc_header_size = sizeof(AOFF_Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
- allctr->min_block_size = (aoffinit->blk_order == FF_BF
- ? (offsetof(AOFF_RBTree_t, u.next)
- + ErtsSizeofMember(AOFF_RBTree_t, u.next))
- : offsetof(AOFF_RBTree_t, u));
+ allctr->min_block_size = sizeof(AOFF_RBTree_t);
allctr->vsn_str = ERTS_ALC_AOFF_ALLOC_VSN_STR;
@@ -311,6 +329,8 @@ erts_aoffalc_start(AOFFAllctr_t *alc,
allctr->add_mbc = aoff_add_mbc;
allctr->remove_mbc = aoff_remove_mbc;
allctr->largest_fblk_in_mbc = aoff_largest_fblk_in_mbc;
+ allctr->first_fblk_in_mbc = aoff_first_fblk_in_mbc;
+ allctr->next_fblk_in_mbc = aoff_next_fblk_in_mbc;
allctr->init_atoms = init_atoms;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
@@ -512,14 +532,15 @@ tree_insert_fixup(AOFF_RBTree_t** root, AOFF_RBTree_t *blk)
static void
aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk)
{
- AOFFAllctr_t* alc = (AOFFAllctr_t*)allctr;
AOFF_RBTree_t* del = (AOFF_RBTree_t*)blk;
AOFF_Carrier_t *crr = (AOFF_Carrier_t*) FBLK_TO_MBC(&del->hdr);
+ (void)allctr;
+
ASSERT(crr->rbt_node.hdr.bhdr == crr->root->max_sz);
- HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0);
+ HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, 0);
- if (alc->blk_order == FF_BF) {
+ if (crr->blk_order == FF_BF) {
ASSERT(del->flags & IS_BF_FLG);
if (IS_LIST_ELEM(del)) {
/* Remove from list */
@@ -534,20 +555,20 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk)
}
else if (AOFF_LIST_NEXT(del)) {
/* Replace tree node by next element in list... */
-
+
ASSERT(AOFF_BLK_SZ(AOFF_LIST_NEXT(del)) == AOFF_BLK_SZ(del));
ASSERT(IS_LIST_ELEM(AOFF_LIST_NEXT(del)));
replace(&crr->root, (AOFF_RBTree_t*)del, AOFF_LIST_NEXT(del));
- HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0);
+ HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, 0);
return;
}
}
rbt_delete(&crr->root, (AOFF_RBTree_t*)del);
- HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0);
+ HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, 0);
/* Update the carrier tree with a potentially new (lower) max_sz
*/
@@ -737,17 +758,18 @@ rbt_delete(AOFF_RBTree_t** root, AOFF_RBTree_t* del)
static void
aoff_link_free_block(Allctr_t *allctr, Block_t *block)
{
- AOFFAllctr_t* alc = (AOFFAllctr_t*) allctr;
AOFF_RBTree_t *blk = (AOFF_RBTree_t *) block;
AOFF_RBTree_t *crr_node;
AOFF_Carrier_t *blk_crr = (AOFF_Carrier_t*) FBLK_TO_MBC(block);
Uint blk_sz = AOFF_BLK_SZ(blk);
+ (void)allctr;
+
ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(&blk_crr->crr));
ASSERT(blk_crr->rbt_node.hdr.bhdr == (blk_crr->root ? blk_crr->root->max_sz : 0));
- HARD_CHECK_TREE(&blk_crr->crr, alc->blk_order, blk_crr->root, 0);
+ HARD_CHECK_TREE(&blk_crr->crr, blk_crr->blk_order, blk_crr->root, 0);
- rbt_insert(alc->blk_order, &blk_crr->root, blk);
+ rbt_insert(blk_crr->blk_order, &blk_crr->root, blk);
/*
* Update carrier tree with a potentially new (larger) max_sz
@@ -891,7 +913,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,
/* Get block within carrier tree
*/
#ifdef HARD_DEBUG
- dbg_blk = HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, size);
+ dbg_blk = HARD_CHECK_TREE(&crr->crr, crr->blk_order, crr->root, size);
#endif
blk = rbt_search(crr->root, size);
@@ -904,7 +926,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,
if (!blk)
return NULL;
- if (cand_blk && cmp_cand_blk(alc->blk_order, cand_blk, blk) < 0) {
+ if (cand_blk && cmp_cand_blk(crr->blk_order, cand_blk, blk) < 0) {
return NULL; /* cand_blk was better */
}
@@ -927,21 +949,28 @@ static void aoff_creating_mbc(Allctr_t *allctr, Carrier_t *carrier)
AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
AOFF_RBTree_t **root = &alc->mbc_root;
+ Sint64 bt = get_birth_time();
HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
crr->rbt_node.hdr.bhdr = 0;
- if (alc->crr_order == FF_AGEFF || IS_DEBUG) {
- Sint64 bt = get_birth_time();
- crr->rbt_node.u.birth_time = bt;
- crr->crr.cpool.pooled.u.birth_time = bt;
- }
+
+ /* While birth time is only used for FF_AGEFF, we have to set it for all
+ * types as we can be migrated to an instance that uses it and we don't
+ * want to mess its order up. */
+ crr->rbt_node.u.birth_time = bt;
+ crr->crr.cpool.pooled.u.birth_time = bt;
+
rbt_insert(alc->crr_order, root, &crr->rbt_node);
/* aoff_link_free_block will add free block later */
crr->root = NULL;
HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
+
+ /* When a carrier has been migrated, its block order may differ from that
+ * of the allocator it's been migrated to. */
+ crr->blk_order = alc->blk_order;
}
#define IS_CRR_IN_TREE(CRR,ROOT) \
@@ -1034,6 +1063,62 @@ static UWord aoff_largest_fblk_in_mbc(Allctr_t* allctr, Carrier_t* carrier)
return crr->rbt_node.hdr.bhdr;
}
+static Block_t *aoff_first_fblk_in_mbc(Allctr_t *allctr, Carrier_t *carrier)
+{
+ AOFF_Carrier_t *crr = (AOFF_Carrier_t*)carrier;
+
+ (void)allctr;
+
+ if (crr->root) {
+ AOFF_RBTree_t *blk;
+
+ /* Descend to the rightmost block of the tree. */
+ for (blk = crr->root; blk->right; blk = blk->right);
+
+ return (Block_t*)blk;
+ }
+
+ return NULL;
+}
+
+static Block_t *aoff_next_fblk_in_mbc(Allctr_t *allctr, Carrier_t *carrier,
+ Block_t *block)
+{
+ AOFF_RBTree_t *parent, *blk;
+
+ (void)allctr;
+ (void)carrier;
+
+ blk = (AOFF_RBTree_t*)block;
+
+ if (blk->left) {
+ /* Descend to the rightmost block of the left subtree. */
+ for (blk = blk->left; blk->right; blk = blk->right);
+
+ return (Block_t*)blk;
+ }
+
+ while (blk->parent) {
+ parent = blk->parent;
+
+ /* If we ascend from the right we know we haven't visited our parent
+ * yet, because we always descend as far as we can to the right when
+ * entering a subtree. */
+ if (parent->right == blk) {
+ ASSERT(parent->left != blk);
+ return (Block_t*)parent;
+ }
+
+ /* If we ascend from the left we know we've already visited our
+ * parent, and will need to keep ascending until we do so from the
+ * right or reach the end of the tree. */
+ ASSERT(parent->left == blk);
+ blk = parent;
+ }
+
+ return NULL;
+}
+
/*
* info_options()
*/
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h
index 68df9e0a49..9c9b98da86 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.h
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h
@@ -32,7 +32,12 @@ enum AOFFSortOrder {
FF_AGEFF = 0, /* carrier trees only */
FF_AOFF = 1,
FF_AOBF = 2, /* block trees only */
- FF_BF = 3 /* block trees only */
+ FF_BF = 3, /* block trees only */
+
+ FF_CHAOS = -1 /* A test-specific sort order that picks any of the above
+ * after instance id. Used to test that carriers created
+ * under one order will work fine after being migrated
+ * to another. */
};
typedef struct {
diff --git a/erts/emulator/beam/erl_arith.c b/erts/emulator/beam/erl_arith.c
index 144fb56ea5..68d1cd989e 100644
--- a/erts/emulator/beam/erl_arith.c
+++ b/erts/emulator/beam/erl_arith.c
@@ -52,19 +52,11 @@ static ERTS_INLINE void maybe_shrink(Process* p, Eterm* hp, Eterm res, Uint allo
Uint actual;
if (is_immed(res)) {
- if (p->heap <= hp && hp < p->htop) {
- p->htop = hp;
- }
- else {
- erts_heap_frag_shrink(p, hp);
- }
+ ASSERT(!(p->heap <= hp && hp < p->htop));
+ erts_heap_frag_shrink(p, hp);
} else if ((actual = bignum_header_arity(*hp)+1) < alloc) {
- if (p->heap <= hp && hp < p->htop) {
- p->htop = hp+actual;
- }
- else {
- erts_heap_frag_shrink(p, hp+actual);
- }
+ ASSERT(!(p->heap <= hp && hp < p->htop));
+ erts_heap_frag_shrink(p, hp+actual);
}
}
@@ -246,7 +238,7 @@ shift(Process* p, Eterm arg1, Eterm arg2, int right)
BIF_ERROR(p, SYSTEM_LIMIT);
}
need = BIG_NEED_SIZE(ires+1);
- bigp = HAlloc(p, need);
+ bigp = HeapFragOnlyAlloc(p, need);
arg1 = big_lshift(arg1, i, bigp);
maybe_shrink(p, bigp, arg1, need);
if (is_nil(arg1)) {
@@ -298,7 +290,7 @@ BIF_RETTYPE bnot_1(BIF_ALIST_1)
ret = make_small(~signed_val(BIF_ARG_1));
} else if (is_big(BIF_ARG_1)) {
Uint need = BIG_NEED_SIZE(big_size(BIF_ARG_1)+1);
- Eterm* bigp = HAlloc(BIF_P, need);
+ Eterm* bigp = HeapFragOnlyAlloc(BIF_P, need);
ret = big_bnot(BIF_ARG_1, bigp);
maybe_shrink(BIF_P, bigp, ret, need);
@@ -343,7 +335,7 @@ erts_mixed_plus(Process* p, Eterm arg1, Eterm arg2)
if (IS_SSMALL(ires)) {
return make_small(ires);
} else {
- hp = HAlloc(p, 2);
+ hp = HeapFragOnlyAlloc(p, 2);
res = small_to_big(ires, hp);
return res;
}
@@ -400,7 +392,7 @@ erts_mixed_plus(Process* p, Eterm arg1, Eterm arg2)
sz2 = big_size(arg2);
sz = MAX(sz1, sz2)+1;
need_heap = BIG_NEED_SIZE(sz);
- hp = HAlloc(p, need_heap);
+ hp = HeapFragOnlyAlloc(p, need_heap);
res = big_plus(arg1, arg2, hp);
maybe_shrink(p, hp, res, need_heap);
if (is_nil(res)) {
@@ -446,7 +438,7 @@ erts_mixed_plus(Process* p, Eterm arg1, Eterm arg2)
do_float:
f1.fd = f1.fd + f2.fd;
ERTS_FP_ERROR(p, f1.fd, goto badarith);
- hp = HAlloc(p, FLOAT_SIZE_OBJECT);
+ hp = HeapFragOnlyAlloc(p, FLOAT_SIZE_OBJECT);
res = make_float(hp);
PUT_DOUBLE(f1, hp);
return res;
@@ -488,7 +480,7 @@ erts_mixed_minus(Process* p, Eterm arg1, Eterm arg2)
if (IS_SSMALL(ires)) {
return make_small(ires);
} else {
- hp = HAlloc(p, 2);
+ hp = HeapFragOnlyAlloc(p, 2);
res = small_to_big(ires, hp);
return res;
}
@@ -534,7 +526,7 @@ erts_mixed_minus(Process* p, Eterm arg1, Eterm arg2)
sz2 = big_size(arg2);
sz = MAX(sz1, sz2)+1;
need_heap = BIG_NEED_SIZE(sz);
- hp = HAlloc(p, need_heap);
+ hp = HeapFragOnlyAlloc(p, need_heap);
res = big_minus(arg1, arg2, hp);
maybe_shrink(p, hp, res, need_heap);
if (is_nil(res)) {
@@ -589,7 +581,7 @@ erts_mixed_minus(Process* p, Eterm arg1, Eterm arg2)
do_float:
f1.fd = f1.fd - f2.fd;
ERTS_FP_ERROR(p, f1.fd, goto badarith);
- hp = HAlloc(p, FLOAT_SIZE_OBJECT);
+ hp = HeapFragOnlyAlloc(p, FLOAT_SIZE_OBJECT);
res = make_float(hp);
PUT_DOUBLE(f1, hp);
return res;
@@ -657,7 +649,7 @@ erts_mixed_times(Process* p, Eterm arg1, Eterm arg2)
hdr = big_res[0];
arity = bignum_header_arity(hdr);
ASSERT(arity == 1 || arity == 2);
- hp = HAlloc(p, arity+1);
+ hp = HeapFragOnlyAlloc(p, arity+1);
res = make_big(hp);
*hp++ = hdr;
*hp++ = big_res[1];
@@ -726,7 +718,7 @@ erts_mixed_times(Process* p, Eterm arg1, Eterm arg2)
do_big:
need_heap = BIG_NEED_SIZE(sz);
- hp = HAlloc(p, need_heap);
+ hp = HeapFragOnlyAlloc(p, need_heap);
res = big_times(arg1, arg2, hp);
/*
@@ -779,7 +771,7 @@ erts_mixed_times(Process* p, Eterm arg1, Eterm arg2)
do_float:
f1.fd = f1.fd * f2.fd;
ERTS_FP_ERROR(p, f1.fd, goto badarith);
- hp = HAlloc(p, FLOAT_SIZE_OBJECT);
+ hp = HeapFragOnlyAlloc(p, FLOAT_SIZE_OBJECT);
res = make_float(hp);
PUT_DOUBLE(f1, hp);
return res;
@@ -905,7 +897,7 @@ erts_mixed_div(Process* p, Eterm arg1, Eterm arg2)
do_float:
f1.fd = f1.fd / f2.fd;
ERTS_FP_ERROR(p, f1.fd, goto badarith);
- hp = HAlloc(p, FLOAT_SIZE_OBJECT);
+ hp = HeapFragOnlyAlloc(p, FLOAT_SIZE_OBJECT);
PUT_DOUBLE(f1, hp);
return make_float(hp);
default:
@@ -957,7 +949,7 @@ erts_int_div(Process* p, Eterm arg1, Eterm arg2)
ires = big_size(arg2);
need = BIG_NEED_SIZE(i-ires+1) + BIG_NEED_SIZE(i);
- hp = HAlloc(p, need);
+ hp = HeapFragOnlyAlloc(p, need);
arg1 = big_div(arg1, arg2, hp);
maybe_shrink(p, hp, arg1, need);
if (is_nil(arg1)) {
@@ -1004,7 +996,7 @@ erts_int_rem(Process* p, Eterm arg1, Eterm arg2)
arg1 = SMALL_ZERO;
} else if (ires > 0) {
Uint need = BIG_NEED_SIZE(big_size(arg1));
- Eterm* hp = HAlloc(p, need);
+ Eterm* hp = HeapFragOnlyAlloc(p, need);
arg1 = big_rem(arg1, arg2, hp);
maybe_shrink(p, hp, arg1, need);
@@ -1041,7 +1033,7 @@ Eterm erts_band(Process* p, Eterm arg1, Eterm arg2)
return THE_NON_VALUE;
}
need = BIG_NEED_SIZE(MAX(big_size(arg1), big_size(arg2)) + 1);
- hp = HAlloc(p, need);
+ hp = HeapFragOnlyAlloc(p, need);
arg1 = big_band(arg1, arg2, hp);
ASSERT(is_not_nil(arg1));
maybe_shrink(p, hp, arg1, need);
@@ -1069,7 +1061,7 @@ Eterm erts_bor(Process* p, Eterm arg1, Eterm arg2)
return THE_NON_VALUE;
}
need = BIG_NEED_SIZE(MAX(big_size(arg1), big_size(arg2)) + 1);
- hp = HAlloc(p, need);
+ hp = HeapFragOnlyAlloc(p, need);
arg1 = big_bor(arg1, arg2, hp);
ASSERT(is_not_nil(arg1));
maybe_shrink(p, hp, arg1, need);
@@ -1097,7 +1089,7 @@ Eterm erts_bxor(Process* p, Eterm arg1, Eterm arg2)
return THE_NON_VALUE;
}
need = BIG_NEED_SIZE(MAX(big_size(arg1), big_size(arg2)) + 1);
- hp = HAlloc(p, need);
+ hp = HeapFragOnlyAlloc(p, need);
arg1 = big_bxor(arg1, arg2, hp);
ASSERT(is_not_nil(arg1));
maybe_shrink(p, hp, arg1, need);
@@ -1110,7 +1102,7 @@ Eterm erts_bnot(Process* p, Eterm arg)
if (is_big(arg)) {
Uint need = BIG_NEED_SIZE(big_size(arg)+1);
- Eterm* bigp = HAlloc(p, need);
+ Eterm* bigp = HeapFragOnlyAlloc(p, need);
ret = big_bnot(arg, bigp);
maybe_shrink(p, bigp, ret, need);
@@ -1125,924 +1117,6 @@ Eterm erts_bnot(Process* p, Eterm arg)
return ret;
}
-#define ERTS_NEED_GC(p, need) ((HEAP_LIMIT((p)) - HEAP_TOP((p))) <= (need))
-
-static ERTS_INLINE void
-trim_heap(Process* p, Eterm* hp, Eterm res)
-{
- if (is_immed(res)) {
- ASSERT(p->heap <= hp && hp <= p->htop);
- p->htop = hp;
- } else {
- Eterm* new_htop;
- ASSERT(is_big(res));
- new_htop = hp + bignum_header_arity(*hp) + 1;
- ASSERT(p->heap <= new_htop && new_htop <= p->htop);
- p->htop = new_htop;
- }
- ASSERT(p->heap <= p->htop && p->htop <= p->stop);
-}
-
-/*
- * The functions that follow are called from the emulator loop.
- * They are not allowed to allocate heap fragments, but must do
- * a garbage collection if there is insufficient heap space.
- */
-
-#define erts_heap_frag_shrink horrible error
-#define maybe_shrink horrible error
-
-Eterm
-erts_gc_mixed_plus(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg1;
- Eterm arg2;
- DECLARE_TMP(tmp_big1,0,p);
- DECLARE_TMP(tmp_big2,1,p);
- Eterm res;
- Eterm hdr;
- FloatDef f1, f2;
- dsize_t sz1, sz2, sz;
- int need_heap;
- Eterm* hp;
- Sint ires;
-
- arg1 = reg[live];
- arg2 = reg[live+1];
- ERTS_FP_CHECK_INIT(p);
- switch (arg1 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- ires = signed_val(arg1) + signed_val(arg2);
- if (IS_SSMALL(ires)) {
- return make_small(ires);
- } else {
- if (ERTS_NEED_GC(p, 2)) {
- erts_garbage_collect(p, 2, reg, live);
- }
- hp = p->htop;
- p->htop += 2;
- res = small_to_big(ires, hp);
- return res;
- }
- default:
- badarith:
- p->freason = BADARITH;
- return THE_NON_VALUE;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- if (arg1 == SMALL_ZERO) {
- return arg2;
- }
- arg1 = small_to_big(signed_val(arg1), tmp_big1);
- goto do_big;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- f1.fd = signed_val(arg1);
- GET_DOUBLE(arg2, f2);
- goto do_float;
- default:
- goto badarith;
- }
- }
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg1);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- if (arg2 == SMALL_ZERO) {
- return arg1;
- }
- arg2 = small_to_big(signed_val(arg2), tmp_big2);
- goto do_big;
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- do_big:
- sz1 = big_size(arg1);
- sz2 = big_size(arg2);
- sz = MAX(sz1, sz2)+1;
- need_heap = BIG_NEED_SIZE(sz);
- if (ERTS_NEED_GC(p, need_heap)) {
- erts_garbage_collect(p, need_heap, reg, live+2);
- if (ARG_IS_NOT_TMP(arg1,tmp_big1)) {
- arg1 = reg[live];
- }
- if (ARG_IS_NOT_TMP(arg2,tmp_big2)) {
- arg2 = reg[live+1];
- }
- }
- hp = p->htop;
- p->htop += need_heap;
- res = big_plus(arg1, arg2, hp);
- trim_heap(p, hp, res);
- if (is_nil(res)) {
- p->freason = SYSTEM_LIMIT;
- return THE_NON_VALUE;
- }
- return res;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- if (big_to_double(arg1, &f1.fd) < 0) {
- goto badarith;
- }
- GET_DOUBLE(arg2, f2);
- goto do_float;
- default:
- goto badarith;
- }
- }
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- f2.fd = signed_val(arg2);
- goto do_float;
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- if (big_to_double(arg2, &f2.fd) < 0) {
- goto badarith;
- }
- goto do_float;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- GET_DOUBLE(arg2, f2);
-
- do_float:
- f1.fd = f1.fd + f2.fd;
- ERTS_FP_ERROR(p, f1.fd, goto badarith);
- if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
- erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live);
- }
- hp = p->htop;
- p->htop += FLOAT_SIZE_OBJECT;
- res = make_float(hp);
- PUT_DOUBLE(f1, hp);
- return res;
- default:
- goto badarith;
- }
- default:
- goto badarith;
- }
- }
- default:
- goto badarith;
- }
-}
-
-Eterm
-erts_gc_mixed_minus(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg1;
- Eterm arg2;
- DECLARE_TMP(tmp_big1,0,p);
- DECLARE_TMP(tmp_big2,1,p);
- Eterm hdr;
- Eterm res;
- FloatDef f1, f2;
- dsize_t sz1, sz2, sz;
- int need_heap;
- Eterm* hp;
- Sint ires;
-
- arg1 = reg[live];
- arg2 = reg[live+1];
- ERTS_FP_CHECK_INIT(p);
- switch (arg1 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- ires = signed_val(arg1) - signed_val(arg2);
- if (IS_SSMALL(ires)) {
- return make_small(ires);
- } else {
- if (ERTS_NEED_GC(p, 2)) {
- erts_garbage_collect(p, 2, reg, live);
- }
- hp = p->htop;
- p->htop += 2;
- res = small_to_big(ires, hp);
- return res;
- }
- default:
- badarith:
- p->freason = BADARITH;
- return THE_NON_VALUE;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- arg1 = small_to_big(signed_val(arg1), tmp_big1);
- goto do_big;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- f1.fd = signed_val(arg1);
- GET_DOUBLE(arg2, f2);
- goto do_float;
- default:
- goto badarith;
- }
- }
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg1);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- if (arg2 == SMALL_ZERO) {
- return arg1;
- }
- arg2 = small_to_big(signed_val(arg2), tmp_big2);
-
- do_big:
- sz1 = big_size(arg1);
- sz2 = big_size(arg2);
- sz = MAX(sz1, sz2)+1;
- need_heap = BIG_NEED_SIZE(sz);
- if (ERTS_NEED_GC(p, need_heap)) {
- erts_garbage_collect(p, need_heap, reg, live+2);
- if (ARG_IS_NOT_TMP(arg1,tmp_big1)) {
- arg1 = reg[live];
- }
- if (ARG_IS_NOT_TMP(arg2,tmp_big2)) {
- arg2 = reg[live+1];
- }
- }
- hp = p->htop;
- p->htop += need_heap;
- res = big_minus(arg1, arg2, hp);
- trim_heap(p, hp, res);
- if (is_nil(res)) {
- p->freason = SYSTEM_LIMIT;
- return THE_NON_VALUE;
- }
- return res;
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- goto do_big;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- if (big_to_double(arg1, &f1.fd) < 0) {
- goto badarith;
- }
- GET_DOUBLE(arg2, f2);
- goto do_float;
- default:
- goto badarith;
- }
- }
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- f2.fd = signed_val(arg2);
- goto do_float;
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- if (big_to_double(arg2, &f2.fd) < 0) {
- goto badarith;
- }
- goto do_float;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- GET_DOUBLE(arg2, f2);
-
- do_float:
- f1.fd = f1.fd - f2.fd;
- ERTS_FP_ERROR(p, f1.fd, goto badarith);
- if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
- erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live);
- }
- hp = p->htop;
- p->htop += FLOAT_SIZE_OBJECT;
- res = make_float(hp);
- PUT_DOUBLE(f1, hp);
- return res;
- default:
- goto badarith;
- }
- default:
- goto badarith;
- }
- }
- default:
- goto badarith;
- }
-}
-
-Eterm
-erts_gc_mixed_times(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg1;
- Eterm arg2;
- DECLARE_TMP(tmp_big1,0,p);
- DECLARE_TMP(tmp_big2,1,p);
- Eterm hdr;
- Eterm res;
- FloatDef f1, f2;
- dsize_t sz1, sz2, sz;
- int need_heap;
- Eterm* hp;
-
- arg1 = reg[live];
- arg2 = reg[live+1];
- ERTS_FP_CHECK_INIT(p);
- switch (arg1 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- if ((arg1 == SMALL_ZERO) || (arg2 == SMALL_ZERO)) {
- return(SMALL_ZERO);
- } else if (arg1 == SMALL_ONE) {
- return(arg2);
- } else if (arg2 == SMALL_ONE) {
- return(arg1);
- } else {
- DeclareTmpHeap(big_res,3,p);
- UseTmpHeap(3,p);
-
- /*
- * The following code is optimized for the case that
- * result is small (which should be the most common case
- * in practice).
- */
- res = small_times(signed_val(arg1), signed_val(arg2),
- big_res);
- if (is_small(res)) {
- UnUseTmpHeap(3,p);
- return res;
- } else {
- /*
- * The result is a a big number.
- * Allocate a heap fragment and copy the result.
- * Be careful to allocate exactly what we need
- * to not leave any holes.
- */
- Uint arity;
- Uint need;
-
- ASSERT(is_big(res));
- hdr = big_res[0];
- arity = bignum_header_arity(hdr);
- ASSERT(arity == 1 || arity == 2);
- need = arity + 1;
- if (ERTS_NEED_GC(p, need)) {
- erts_garbage_collect(p, need, reg, live);
- }
- hp = p->htop;
- p->htop += need;
- res = make_big(hp);
- *hp++ = hdr;
- *hp++ = big_res[1];
- if (arity > 1) {
- *hp = big_res[2];
- }
- UnUseTmpHeap(3,p);
- return res;
- }
- }
- default:
- badarith:
- p->freason = BADARITH;
- return THE_NON_VALUE;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- if (arg1 == SMALL_ZERO)
- return(SMALL_ZERO);
- if (arg1 == SMALL_ONE)
- return(arg2);
- arg1 = small_to_big(signed_val(arg1), tmp_big1);
- sz = 2 + big_size(arg2);
- goto do_big;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- f1.fd = signed_val(arg1);
- GET_DOUBLE(arg2, f2);
- goto do_float;
- default:
- goto badarith;
- }
- }
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg1);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- if (arg2 == SMALL_ZERO)
- return(SMALL_ZERO);
- if (arg2 == SMALL_ONE)
- return(arg1);
- arg2 = small_to_big(signed_val(arg2), tmp_big2);
- sz = 2 + big_size(arg1);
- goto do_big;
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- sz1 = big_size(arg1);
- sz2 = big_size(arg2);
- sz = sz1 + sz2;
-
- do_big:
- need_heap = BIG_NEED_SIZE(sz);
- if (ERTS_NEED_GC(p, need_heap)) {
- erts_garbage_collect(p, need_heap, reg, live+2);
- if (ARG_IS_NOT_TMP(arg1,tmp_big1)) {
- arg1 = reg[live];
- }
- if (ARG_IS_NOT_TMP(arg2,tmp_big2)) {
- arg2 = reg[live+1];
- }
- }
- hp = p->htop;
- p->htop += need_heap;
- res = big_times(arg1, arg2, hp);
- trim_heap(p, hp, res);
-
- /*
- * Note that the result must be big in this case, since
- * at least one operand was big to begin with, and
- * the absolute value of the other is > 1.
- */
-
- if (is_nil(res)) {
- p->freason = SYSTEM_LIMIT;
- return THE_NON_VALUE;
- }
- return res;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- if (big_to_double(arg1, &f1.fd) < 0) {
- goto badarith;
- }
- GET_DOUBLE(arg2, f2);
- goto do_float;
- default:
- goto badarith;
- }
- }
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- f2.fd = signed_val(arg2);
- goto do_float;
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- if (big_to_double(arg2, &f2.fd) < 0) {
- goto badarith;
- }
- goto do_float;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- GET_DOUBLE(arg2, f2);
-
- do_float:
- f1.fd = f1.fd * f2.fd;
- ERTS_FP_ERROR(p, f1.fd, goto badarith);
- if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
- erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live);
- }
- hp = p->htop;
- p->htop += FLOAT_SIZE_OBJECT;
- res = make_float(hp);
- PUT_DOUBLE(f1, hp);
- return res;
- default:
- goto badarith;
- }
- default:
- goto badarith;
- }
- }
- default:
- goto badarith;
- }
-}
-
-Eterm
-erts_gc_mixed_div(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg1;
- Eterm arg2;
- FloatDef f1, f2;
- Eterm* hp;
- Eterm hdr;
-
- arg1 = reg[live];
- arg2 = reg[live+1];
- ERTS_FP_CHECK_INIT(p);
- switch (arg1 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- f1.fd = signed_val(arg1);
- f2.fd = signed_val(arg2);
- goto do_float;
- default:
- badarith:
- p->freason = BADARITH;
- return THE_NON_VALUE;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- f1.fd = signed_val(arg1);
- if (big_to_double(arg2, &f2.fd) < 0) {
- goto badarith;
- }
- goto do_float;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- f1.fd = signed_val(arg1);
- GET_DOUBLE(arg2, f2);
- goto do_float;
- default:
- goto badarith;
- }
- }
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg1);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- if (big_to_double(arg1, &f1.fd) < 0) {
- goto badarith;
- }
- f2.fd = signed_val(arg2);
- goto do_float;
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- if (big_to_double(arg1, &f1.fd) < 0 ||
- big_to_double(arg2, &f2.fd) < 0) {
- goto badarith;
- }
- goto do_float;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- if (big_to_double(arg1, &f1.fd) < 0) {
- goto badarith;
- }
- GET_DOUBLE(arg2, f2);
- goto do_float;
- default:
- goto badarith;
- }
- }
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- switch (arg2 & _TAG_PRIMARY_MASK) {
- case TAG_PRIMARY_IMMED1:
- switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- f2.fd = signed_val(arg2);
- goto do_float;
- default:
- goto badarith;
- }
- case TAG_PRIMARY_BOXED:
- hdr = *boxed_val(arg2);
- switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
- case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
- case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- if (big_to_double(arg2, &f2.fd) < 0) {
- goto badarith;
- }
- goto do_float;
- case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- GET_DOUBLE(arg1, f1);
- GET_DOUBLE(arg2, f2);
-
- do_float:
- f1.fd = f1.fd / f2.fd;
- ERTS_FP_ERROR(p, f1.fd, goto badarith);
- if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
- erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live);
- }
- hp = p->htop;
- p->htop += FLOAT_SIZE_OBJECT;
- PUT_DOUBLE(f1, hp);
- return make_float(hp);
- default:
- goto badarith;
- }
- default:
- goto badarith;
- }
- }
- default:
- goto badarith;
- }
-}
-
-Eterm
-erts_gc_int_div(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg1;
- Eterm arg2;
- DECLARE_TMP(tmp_big1,0,p);
- DECLARE_TMP(tmp_big2,1,p);
- int ires;
-
- arg1 = reg[live];
- arg2 = reg[live+1];
- switch (NUMBER_CODE(arg1, arg2)) {
- case SMALL_SMALL:
- /* This case occurs if the most negative fixnum is divided by -1. */
- ASSERT(arg2 == make_small(-1));
- arg1 = small_to_big(signed_val(arg1), tmp_big1);
- /*FALLTHROUGH*/
- case BIG_SMALL:
- arg2 = small_to_big(signed_val(arg2), tmp_big2);
- goto L_big_div;
- case SMALL_BIG:
- if (arg1 != make_small(MIN_SMALL)) {
- return SMALL_ZERO;
- }
- arg1 = small_to_big(signed_val(arg1), tmp_big1);
- /*FALLTHROUGH*/
- case BIG_BIG:
- L_big_div:
- ires = big_ucomp(arg1, arg2);
- if (ires < 0) {
- arg1 = SMALL_ZERO;
- } else if (ires == 0) {
- arg1 = (big_sign(arg1) == big_sign(arg2)) ?
- SMALL_ONE : SMALL_MINUS_ONE;
- } else {
- Eterm* hp;
- int i = big_size(arg1);
- Uint need;
-
- ires = big_size(arg2);
- need = BIG_NEED_SIZE(i-ires+1) + BIG_NEED_SIZE(i);
- if (ERTS_NEED_GC(p, need)) {
- erts_garbage_collect(p, need, reg, live+2);
- if (ARG_IS_NOT_TMP(arg1,tmp_big1)) {
- arg1 = reg[live];
- }
- if (ARG_IS_NOT_TMP(arg2,tmp_big2)) {
- arg2 = reg[live+1];
- }
- }
- hp = p->htop;
- p->htop += need;
- arg1 = big_div(arg1, arg2, hp);
- trim_heap(p, hp, arg1);
- if (is_nil(arg1)) {
- p->freason = SYSTEM_LIMIT;
- return THE_NON_VALUE;
- }
- }
- return arg1;
- default:
- p->freason = BADARITH;
- return THE_NON_VALUE;
- }
-}
-
-Eterm
-erts_gc_int_rem(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg1;
- Eterm arg2;
- DECLARE_TMP(tmp_big1,0,p);
- DECLARE_TMP(tmp_big2,1,p);
- int ires;
-
- arg1 = reg[live];
- arg2 = reg[live+1];
- switch (NUMBER_CODE(arg1, arg2)) {
- case BIG_SMALL:
- arg2 = small_to_big(signed_val(arg2), tmp_big2);
- goto L_big_rem;
- case SMALL_BIG:
- if (arg1 != make_small(MIN_SMALL)) {
- return arg1;
- } else {
- Eterm tmp;
- tmp = small_to_big(signed_val(arg1), tmp_big1);
- if ((ires = big_ucomp(tmp, arg2)) == 0) {
- return SMALL_ZERO;
- } else {
- ASSERT(ires < 0);
- return arg1;
- }
- }
- /* All paths returned */
- case BIG_BIG:
- L_big_rem:
- ires = big_ucomp(arg1, arg2);
- if (ires == 0) {
- arg1 = SMALL_ZERO;
- } else if (ires > 0) {
- Eterm* hp;
- Uint need = BIG_NEED_SIZE(big_size(arg1));
-
- if (ERTS_NEED_GC(p, need)) {
- erts_garbage_collect(p, need, reg, live+2);
- if (ARG_IS_NOT_TMP(arg1,tmp_big1)) {
- arg1 = reg[live];
- }
- if (ARG_IS_NOT_TMP(arg2,tmp_big2)) {
- arg2 = reg[live+1];
- }
- }
- hp = p->htop;
- p->htop += need;
- arg1 = big_rem(arg1, arg2, hp);
- trim_heap(p, hp, arg1);
- if (is_nil(arg1)) {
- p->freason = SYSTEM_LIMIT;
- return THE_NON_VALUE;
- }
- }
- return arg1;
- default:
- p->freason = BADARITH;
- return THE_NON_VALUE;
- }
-}
-
-#define DEFINE_GC_LOGIC_FUNC(func) \
-Eterm erts_gc_##func(Process* p, Eterm* reg, Uint live) \
-{ \
- Eterm arg1; \
- Eterm arg2; \
- DECLARE_TMP(tmp_big1,0,p); \
- DECLARE_TMP(tmp_big2,1,p); \
- Eterm* hp; \
- int need; \
- \
- arg1 = reg[live]; \
- arg2 = reg[live+1]; \
- switch (NUMBER_CODE(arg1, arg2)) { \
- case SMALL_BIG: \
- arg1 = small_to_big(signed_val(arg1), tmp_big1); \
- need = BIG_NEED_SIZE(big_size(arg2) + 1); \
- if (ERTS_NEED_GC(p, need)) { \
- erts_garbage_collect(p, need, reg, live+2); \
- arg2 = reg[live+1]; \
- } \
- break; \
- case BIG_SMALL: \
- arg2 = small_to_big(signed_val(arg2), tmp_big2); \
- need = BIG_NEED_SIZE(big_size(arg1) + 1); \
- if (ERTS_NEED_GC(p, need)) { \
- erts_garbage_collect(p, need, reg, live+2); \
- arg1 = reg[live]; \
- } \
- break; \
- case BIG_BIG: \
- need = BIG_NEED_SIZE(MAX(big_size(arg1), big_size(arg2)) + 1); \
- if (ERTS_NEED_GC(p, need)) { \
- erts_garbage_collect(p, need, reg, live+2); \
- arg1 = reg[live]; \
- arg2 = reg[live+1]; \
- } \
- break; \
- default: \
- p->freason = BADARITH; \
- return THE_NON_VALUE; \
- } \
- hp = p->htop; \
- p->htop += need; \
- arg1 = big_##func(arg1, arg2, hp); \
- trim_heap(p, hp, arg1); \
- return arg1; \
-}
-
-DEFINE_GC_LOGIC_FUNC(band)
-DEFINE_GC_LOGIC_FUNC(bor)
-DEFINE_GC_LOGIC_FUNC(bxor)
-
-Eterm erts_gc_bnot(Process* p, Eterm* reg, Uint live)
-{
- Eterm result;
- Eterm arg;
- Uint need;
- Eterm* bigp;
-
- arg = reg[live];
- if (is_not_big(arg)) {
- p->freason = BADARITH;
- return NIL;
- } else {
- need = BIG_NEED_SIZE(big_size(arg)+1);
- if (ERTS_NEED_GC(p, need)) {
- erts_garbage_collect(p, need, reg, live+1);
- arg = reg[live];
- }
- bigp = p->htop;
- p->htop += need;
- result = big_bnot(arg, bigp);
- trim_heap(p, bigp, result);
- if (is_nil(result)) {
- p->freason = SYSTEM_LIMIT;
- return NIL;
- }
- }
- return result;
-}
-
/* Needed to remove compiler optimization */
double erts_get_positive_zero_float() {
return 0.0f;
diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c
index 3f981ca2bc..0e7be99801 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.c
+++ b/erts/emulator/beam/erl_bestfit_alloc.c
@@ -209,6 +209,8 @@ erts_bfalc_start(BFAllctr_t *bfallctr,
allctr->add_mbc = NULL;
allctr->remove_mbc = NULL;
allctr->largest_fblk_in_mbc = NULL;
+ allctr->first_fblk_in_mbc = NULL;
+ allctr->next_fblk_in_mbc = NULL;
allctr->init_atoms = init_atoms;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index ae1bf6e652..4d6d31cd76 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -208,8 +208,8 @@ typedef struct _ac_trie {
typedef struct _bm_data {
byte *x;
Sint len;
+ Sint *badshift;
Sint *goodshift;
- Sint badshift[ALPHABET_SIZE];
} BMData;
typedef struct _ac_find_all_state {
@@ -319,16 +319,104 @@ static void dump_ac_node(ACNode *node, int indent, int ch);
* The needed size of binary data for a search structure - given the
* accumulated string lengths.
*/
-#define BM_SIZE(StrLen) /* StrLen: length of searchstring */ \
-((MYALIGN(sizeof(Sint) * (StrLen))) + /* goodshift array */ \
- MYALIGN(StrLen) + /* searchstring saved */ \
- (MYALIGN(sizeof(BMData)))) /* Structure */
+#define BM_SIZE_SINGLE() /* Single byte search string */ \
+(MYALIGN(1) + /* searchstring saved */ \
+ (MYALIGN(sizeof(BMData)))) /* Structure */
+
+#define BM_SIZE_MULTI(StrLen) /* StrLen: length of searchstring */ \
+((MYALIGN(sizeof(Uint) * (StrLen))) + /* goodshift array */ \
+ (MYALIGN(sizeof(Uint) * ALPHABET_SIZE)) + /* badshift array */ \
+ MYALIGN(StrLen) + /* searchstring saved */ \
+ (MYALIGN(sizeof(BMData)))) /* Structure */
#define AC_SIZE(StrLens) /* StrLens: sum of all searchstring lengths */ \
((MYALIGN(sizeof(ACNode)) * \
((StrLens)+1)) + /* The actual nodes (including rootnode) */ \
MYALIGN(sizeof(ACTrie))) /* Structure */
+/*
+ * Boyer Moore - most obviously implemented more or less exactly as
+ * Christian Charras and Thierry Lecroq describe it in "Handbook of
+ * Exact String-Matching Algorithms"
+ * http://www-igm.univ-mlv.fr/~lecroq/string/
+ */
+
+/*
+ * Call this to compute badshifts array
+ */
+static void compute_badshifts(BMData *bmd)
+{
+ Sint i;
+ Sint m = bmd->len;
+
+ for (i = 0; i < ALPHABET_SIZE; ++i) {
+ bmd->badshift[i] = m;
+ }
+ for (i = 0; i < m - 1; ++i) {
+ bmd->badshift[bmd->x[i]] = m - i - 1;
+ }
+}
+
+/* Helper for "compute_goodshifts" */
+static void compute_suffixes(byte *x, Sint m, Sint *suffixes)
+{
+ int f,g,i;
+
+ suffixes[m - 1] = m;
+
+ f = 0; /* To avoid use before set warning */
+
+ g = m - 1;
+
+ for (i = m - 2; i >= 0; --i) {
+ if (i > g && suffixes[i + m - 1 - f] < i - g) {
+ suffixes[i] = suffixes[i + m - 1 - f];
+ } else {
+ if (i < g) {
+ g = i;
+ }
+ f = i;
+ while ( g >= 0 && x[g] == x[g + m - 1 - f] ) {
+ --g;
+ }
+ suffixes[i] = f - g;
+ }
+ }
+}
+
+/*
+ * Call this to compute goodshift array
+ */
+static void compute_goodshifts(BMData *bmd)
+{
+ Sint m = bmd->len;
+ byte *x = bmd->x;
+ Sint i, j;
+ Sint *suffixes = erts_alloc(ERTS_ALC_T_TMP, m * sizeof(Sint));
+
+ compute_suffixes(x, m, suffixes);
+
+ for (i = 0; i < m; ++i) {
+ bmd->goodshift[i] = m;
+ }
+
+ j = 0;
+
+ for (i = m - 1; i >= -1; --i) {
+ if (i == -1 || suffixes[i] == i + 1) {
+ while (j < m - 1 - i) {
+ if (bmd->goodshift[j] == m) {
+ bmd->goodshift[j] = m - 1 - i;
+ }
+ ++j;
+ }
+ }
+ }
+ for (i = 0; i <= m - 2; ++i) {
+ bmd->goodshift[m - 1 - suffixes[i]] = m - 1 - i;
+ }
+ erts_free(ERTS_ALC_T_TMP, suffixes);
+}
/*
* Callback for the magic binary
@@ -377,20 +465,37 @@ static ACTrie *create_acdata(MyAllocator *my, Uint len,
/*
* The same initialization of allocator and basic data for Boyer-Moore.
+ * For single byte, we don't use goodshift and badshift, only memchr.
*/
static BMData *create_bmdata(MyAllocator *my, byte *x, Uint len,
Binary **the_bin /* out */)
{
- Uint datasize = BM_SIZE(len);
+ Uint datasize;
BMData *bmd;
- Binary *mb = erts_create_magic_binary(datasize,cleanup_my_data_bm);
- byte *data = ERTS_MAGIC_BIN_DATA(mb);
+ Binary *mb;
+ byte *data;
+
+ if(len > 1) {
+ datasize = BM_SIZE_MULTI(len);
+ } else {
+ datasize = BM_SIZE_SINGLE();
+ }
+
+ mb = erts_create_magic_binary(datasize,cleanup_my_data_bm);
+ data = ERTS_MAGIC_BIN_DATA(mb);
init_my_allocator(my, datasize, data);
bmd = my_alloc(my, sizeof(BMData));
bmd->x = my_alloc(my,len);
sys_memcpy(bmd->x,x,len);
bmd->len = len;
- bmd->goodshift = my_alloc(my,sizeof(Uint) * len);
+
+ if(len > 1) {
+ bmd->goodshift = my_alloc(my, sizeof(Uint) * len);
+ bmd->badshift = my_alloc(my, sizeof(Uint) * ALPHABET_SIZE);
+ compute_badshifts(bmd);
+ compute_goodshifts(bmd);
+ }
+
*the_bin = mb;
return bmd;
}
@@ -711,91 +816,8 @@ static BFReturn ac_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta
return (m == 0) ? BF_NOT_FOUND : BF_OK;
}
-/*
- * Boyer Moore - most obviously implemented more or less exactly as
- * Christian Charras and Thierry Lecroq describe it in "Handbook of
- * Exact String-Matching Algorithms"
- * http://www-igm.univ-mlv.fr/~lecroq/string/
- */
-
-/*
- * Call this to compute badshifts array
- */
-static void compute_badshifts(BMData *bmd)
-{
- Sint i;
- Sint m = bmd->len;
-
- for (i = 0; i < ALPHABET_SIZE; ++i) {
- bmd->badshift[i] = m;
- }
- for (i = 0; i < m - 1; ++i) {
- bmd->badshift[bmd->x[i]] = m - i - 1;
- }
-}
-
-/* Helper for "compute_goodshifts" */
-static void compute_suffixes(byte *x, Sint m, Sint *suffixes)
-{
- int f,g,i;
-
- suffixes[m - 1] = m;
-
- f = 0; /* To avoid use before set warning */
-
- g = m - 1;
-
- for (i = m - 2; i >= 0; --i) {
- if (i > g && suffixes[i + m - 1 - f] < i - g) {
- suffixes[i] = suffixes[i + m - 1 - f];
- } else {
- if (i < g) {
- g = i;
- }
- f = i;
- while ( g >= 0 && x[g] == x[g + m - 1 - f] ) {
- --g;
- }
- suffixes[i] = f - g;
- }
- }
-}
-
-/*
- * Call this to compute goodshift array
- */
-static void compute_goodshifts(BMData *bmd)
-{
- Sint m = bmd->len;
- byte *x = bmd->x;
- Sint i, j;
- Sint *suffixes = erts_alloc(ERTS_ALC_T_TMP, m * sizeof(Sint));
-
- compute_suffixes(x, m, suffixes);
-
- for (i = 0; i < m; ++i) {
- bmd->goodshift[i] = m;
- }
-
- j = 0;
-
- for (i = m - 1; i >= -1; --i) {
- if (i == -1 || suffixes[i] == i + 1) {
- while (j < m - 1 - i) {
- if (bmd->goodshift[j] == m) {
- bmd->goodshift[j] = m - 1 - i;
- }
- ++j;
- }
- }
- }
- for (i = 0; i <= m - 2; ++i) {
- bmd->goodshift[m - 1 - suffixes[i]] = m - 1 - i;
- }
- erts_free(ERTS_ALC_T_TMP, suffixes);
-}
-
#define BM_LOOP_FACTOR 10 /* Should we have a higher value? */
+#define MC_LOOP_FACTOR 8
static void bm_init_find_first_match(BinaryFindContext *ctx)
{
@@ -819,13 +841,38 @@ static BFReturn bm_find_first_match(BinaryFindContext *ctx, byte *haystack)
Sint i;
Sint j = state->pos;
register Uint reds = *reductions;
+ byte *pos_pointer;
+ Sint needle_last = blen - 1;
+ Sint mem_read = len - needle_last - j;
+
+ if (mem_read <= 0) {
+ return BF_NOT_FOUND;
+ }
+ mem_read = MIN(mem_read, reds * MC_LOOP_FACTOR);
+ ASSERT(mem_read > 0);
- while (j <= len - blen) {
+ pos_pointer = memchr(&haystack[j + needle_last], needle[needle_last], mem_read);
+ if (pos_pointer == NULL) {
+ reds -= mem_read / MC_LOOP_FACTOR;
+ j += mem_read;
+ } else {
+ reds -= (pos_pointer - &haystack[j]) / MC_LOOP_FACTOR;
+ j = pos_pointer - haystack - needle_last;
+ }
+
+ // Ensure we have at least one reduction before entering the loop
+ ++reds;
+
+ for(;;) {
+ if (j > len - blen) {
+ *reductions = reds;
+ return BF_NOT_FOUND;
+ }
if (--reds == 0) {
state->pos = j;
return BF_RESTART;
}
- for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i)
+ for (i = needle_last; i >= 0 && needle[i] == haystack[i + j]; --i)
;
if (i < 0) { /* found */
*reductions = reds;
@@ -835,8 +882,6 @@ static BFReturn bm_find_first_match(BinaryFindContext *ctx, byte *haystack)
}
j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i);
}
- *reductions = reds;
- return BF_NOT_FOUND;
}
static void bm_init_find_all(BinaryFindContext *ctx)
@@ -875,14 +920,38 @@ static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta
Sint *gs = bmd->goodshift;
Sint *bs = bmd->badshift;
byte *needle = bmd->x;
- Sint i;
+ Sint i = -1; /* Use memchr on start and on every match */
Sint j = state->pos;
Uint m = state->m;
Uint allocated = state->allocated;
FindallData *out = state->out;
register Uint reds = *reductions;
+ byte *pos_pointer;
+ Sint needle_last = blen - 1;
+ Sint mem_read;
- while (j <= len - blen) {
+ for(;;) {
+ if (i < 0) {
+ mem_read = len - needle_last - j;
+ if(mem_read <= 0) {
+ goto done;
+ }
+ mem_read = MIN(mem_read, reds * MC_LOOP_FACTOR);
+ ASSERT(mem_read > 0);
+ pos_pointer = memchr(&haystack[j + needle_last], needle[needle_last], mem_read);
+ if (pos_pointer == NULL) {
+ reds -= mem_read / MC_LOOP_FACTOR;
+ j += mem_read;
+ } else {
+ reds -= (pos_pointer - &haystack[j]) / MC_LOOP_FACTOR;
+ j = pos_pointer - haystack - needle_last;
+ }
+ // Ensure we have at least one reduction when resuming the loop
+ ++reds;
+ }
+ if (j > len - blen) {
+ goto done;
+ }
if (--reds == 0) {
state->pos = j;
state->m = m;
@@ -890,7 +959,7 @@ static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta
state->out = out;
return BF_RESTART;
}
- for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i)
+ for (i = needle_last; i >= 0 && needle[i] == haystack[i + j]; --i)
;
if (i < 0) { /* found */
if (m >= allocated) {
@@ -912,6 +981,7 @@ static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haysta
j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i);
}
}
+ done:
state->m = m;
state->out = out;
*reductions = reds;
@@ -931,6 +1001,7 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp)
Eterm t, b, comp_term = NIL;
Uint characters;
Uint words;
+ Uint size;
characters = 0;
words = 0;
@@ -946,11 +1017,12 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp)
if (binary_bitsize(b) != 0) {
goto badarg;
}
- if (binary_size(b) == 0) {
+ size = binary_size(b);
+ if (size == 0) {
goto badarg;
}
++words;
- characters += binary_size(b);
+ characters += size;
}
if (is_not_nil(t)) {
goto badarg;
@@ -979,16 +1051,13 @@ static int do_binary_match_compile(Eterm argument, Eterm *tag, Binary **binp)
Uint bitoffs, bitsize;
byte *temp_alloc = NULL;
MyAllocator my;
- BMData *bmd;
Binary *bin;
ERTS_GET_BINARY_BYTES(comp_term, bytes, bitoffs, bitsize);
if (bitoffs != 0) {
bytes = erts_get_aligned_binary_bytes(comp_term, &temp_alloc);
}
- bmd = create_bmdata(&my, bytes, characters, &bin);
- compute_badshifts(bmd);
- compute_goodshifts(bmd);
+ create_bmdata(&my, bytes, characters, &bin);
erts_free_aligned_binary_bytes(temp_alloc);
CHECK_ALLOCATOR(my);
*tag = am_bm;
@@ -1901,9 +1970,7 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
goto badarg;
}
-
-
- hp = HAlloc(p, ERL_SUB_BIN_SIZE);
+ hp = HeapFragOnlyAlloc(p, ERL_SUB_BIN_SIZE);
ERTS_GET_REAL_BIN(binary, orig, offset, bit_offset, bit_size);
sb = (ErlSubBin *) hp;
@@ -1921,100 +1988,6 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
BIF_ERROR(p, BADARG);
}
-#define ERTS_NEED_GC(p, need) ((HEAP_LIMIT((p)) - HEAP_TOP((p))) <= (need))
-
-BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is_tuple)
-{
- Uint pos;
- Sint len;
- size_t orig_size;
- Eterm orig;
- Uint offset;
- Uint bit_offset;
- Uint bit_size;
- Eterm* hp;
- ErlSubBin* sb;
- Eterm binary;
- Eterm *tp;
- Eterm epos, elen;
- int extra_args;
-
-
- if (range_is_tuple) {
- Eterm tpl = reg[live];
- extra_args = 1;
- if (is_not_tuple(tpl)) {
- goto badarg;
- }
- tp = tuple_val(tpl);
- if (arityval(*tp) != 2) {
- goto badarg;
- }
-
- epos = tp[1];
- elen = tp[2];
- } else {
- extra_args = 2;
- epos = reg[live-1];
- elen = reg[live];
- }
- binary = reg[live-extra_args];
-
- if (is_not_binary(binary)) {
- goto badarg;
- }
- if (!term_to_Uint(epos, &pos)) {
- goto badarg;
- }
- if (!term_to_Sint(elen, &len)) {
- goto badarg;
- }
- if (len < 0) {
- Uint lentmp = -(Uint)len;
- /* overflow */
- if ((Sint)lentmp < 0) {
- goto badarg;
- }
- len = lentmp;
- if (len > pos) {
- goto badarg;
- }
- pos -= len;
- }
- /* overflow */
- if ((pos + len) < pos || (len > 0 && (pos + len) == pos)) {
- goto badarg;
- }
- if ((orig_size = binary_size(binary)) < pos ||
- orig_size < (pos + len)) {
- goto badarg;
- }
-
- if (ERTS_NEED_GC(p, ERL_SUB_BIN_SIZE)) {
- erts_garbage_collect(p, ERL_SUB_BIN_SIZE, reg, live+1-extra_args); /* I don't need the tuple
- or indices any more */
- binary = reg[live-extra_args];
- }
-
- hp = p->htop;
- p->htop += ERL_SUB_BIN_SIZE;
-
- ERTS_GET_REAL_BIN(binary, orig, offset, bit_offset, bit_size);
-
- sb = (ErlSubBin *) hp;
- sb->thing_word = HEADER_SUB_BIN;
- sb->size = len;
- sb->offs = offset + pos;
- sb->orig = orig;
- sb->bitoffs = bit_offset;
- sb->bitsize = 0;
- sb->is_writable = 0;
-
- BIF_RET(make_binary(sb));
-
- badarg:
- BIF_ERROR(p, BADARG);
-}
/*************************************************************
* The actual guard BIFs are in erl_bif_guard.c
* but the implementation of both the non-gc and the gc
@@ -3008,17 +2981,19 @@ static void dump_bm_data(BMData *bm)
}
}
erts_printf(">>\n");
- erts_printf("GoodShift array:\n");
- for (i = 0; i < bm->len; ++i) {
- erts_printf("GoodShift[%d]: %ld\n", i, bm->goodshift[i]);
- }
- erts_printf("BadShift array:\n");
- j = 0;
- for (i = 0; i < ALPHABET_SIZE; i += j) {
- for (j = 0; i + j < ALPHABET_SIZE && j < 6; ++j) {
- erts_printf("BS[%03d]:%02ld, ", i+j, bm->badshift[i+j]);
+ if(bm->len > 1) {
+ erts_printf("GoodShift array:\n");
+ for (i = 0; i < bm->len; ++i) {
+ erts_printf("GoodShift[%d]: %ld\n", i, bm->goodshift[i]);
+ }
+ erts_printf("BadShift array:\n");
+ j = 0;
+ for (i = 0; i < ALPHABET_SIZE; i += j) {
+ for (j = 0; i + j < ALPHABET_SIZE && j < 6; ++j) {
+ erts_printf("BS[%03d]:%02ld, ", i+j, bm->badshift[i+j]);
+ }
+ erts_printf("\n");
}
- erts_printf("\n");
}
}
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 4cda0948a0..639aee29dc 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -829,7 +829,7 @@ BIF_RETTYPE erl_ddll_format_error_int_1(BIF_ALIST_1)
"cannot be loaded/unloaded";
break;
case am_permanent:
- errstring = "DDLL driver is permanent an can not be unloaded/loaded";
+ errstring = "DDLL driver is permanent an cannot be unloaded/loaded";
break;
case am_not_loaded:
errstring = "DDLL driver is not loaded";
diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c
index 8a5c6ada6c..09757e473b 100644
--- a/erts/emulator/beam/erl_bif_guard.c
+++ b/erts/emulator/beam/erl_bif_guard.c
@@ -19,7 +19,12 @@
*/
/*
- * Numeric guard BIFs.
+ * This file implements the former GC BIFs. They used to do a GC when
+ * they needed heap space. Because of changes to the implementation of
+ * literals, those BIFs are now allowed to allocate heap fragments
+ * (using HeapFragOnlyAlloc()). Note that they must NOT call HAlloc(),
+ * because the caller does not do any SWAPIN / SWAPOUT (that is,
+ * HEAP_TOP(p) and HEAP_LIMIT(p) contain stale values).
*/
#ifdef HAVE_CONFIG_H
@@ -36,14 +41,16 @@
#include "erl_binary.h"
#include "erl_map.h"
-static Eterm gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live);
-
static Eterm double_to_integer(Process* p, double x);
+static BIF_RETTYPE erlang_length_trap(BIF_ALIST_3);
+static Export erlang_length_export;
-/*
- * Guard BIFs called using apply/3 and guard BIFs that never build
- * anything on the heap.
- */
+void erts_init_bif_guard(void)
+{
+ erts_init_trap_export(&erlang_length_export,
+ am_erlang, am_length, 3,
+ &erlang_length_trap);
+}
BIF_RETTYPE abs_1(BIF_ALIST_1)
{
@@ -56,7 +63,7 @@ BIF_RETTYPE abs_1(BIF_ALIST_1)
i0 = signed_val(BIF_ARG_1);
i = ERTS_SMALL_ABS(i0);
if (i0 == MIN_SMALL) {
- hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ hp = HeapFragOnlyAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
BIF_RET(uint_to_big(i, hp));
} else {
BIF_RET(make_small(i));
@@ -68,7 +75,7 @@ BIF_RETTYPE abs_1(BIF_ALIST_1)
int sz = big_arity(BIF_ARG_1) + 1;
Uint* x;
- hp = HAlloc(BIF_P, sz); /* See note at beginning of file */
+ hp = HeapFragOnlyAlloc(BIF_P, sz); /* See note at beginning of file */
sz--;
res = make_big(hp);
x = big_val(BIF_ARG_1);
@@ -83,7 +90,7 @@ BIF_RETTYPE abs_1(BIF_ALIST_1)
GET_DOUBLE(BIF_ARG_1, f);
if (f.fd < 0.0) {
- hp = HAlloc(BIF_P, FLOAT_SIZE_OBJECT);
+ hp = HeapFragOnlyAlloc(BIF_P, FLOAT_SIZE_OBJECT);
f.fd = fabs(f.fd);
res = make_float(hp);
PUT_DOUBLE(f, hp);
@@ -116,7 +123,7 @@ BIF_RETTYPE float_1(BIF_ALIST_1)
} else if (big_to_double(BIF_ARG_1, &f.fd) < 0) {
goto badarg;
}
- hp = HAlloc(BIF_P, FLOAT_SIZE_OBJECT);
+ hp = HeapFragOnlyAlloc(BIF_P, FLOAT_SIZE_OBJECT);
res = make_float(hp);
PUT_DOUBLE(f, hp);
BIF_RET(res);
@@ -194,26 +201,113 @@ BIF_RETTYPE round_1(BIF_ALIST_1)
BIF_RET(res);
}
+/*
+ * This version of length/1 is called from native code and apply/3.
+ */
+
BIF_RETTYPE length_1(BIF_ALIST_1)
{
+ Eterm args[3];
+
+ /*
+ * Arrange argument registers the way expected by
+ * erts_trapping_length_1(). We save the original argument in
+ * args[2] in case an error should signaled.
+ */
+
+ args[0] = BIF_ARG_1;
+ args[1] = make_small(0);
+ args[2] = BIF_ARG_1;
+ return erlang_length_trap(BIF_P, args, A__I);
+}
+
+static BIF_RETTYPE erlang_length_trap(BIF_ALIST_3)
+{
+ Eterm res;
+
+ res = erts_trapping_length_1(BIF_P, BIF__ARGS);
+ if (is_value(res)) { /* Success. */
+ BIF_RET(res);
+ } else { /* Trap or error. */
+ if (BIF_P->freason == TRAP) {
+ /*
+ * The available reductions were exceeded. Trap.
+ */
+ BIF_TRAP3(&erlang_length_export, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ } else {
+ /*
+ * Signal an error. The original argument was tucked away in BIF_ARG_3.
+ */
+ ERTS_BIF_ERROR_TRAPPED1(BIF_P, BIF_P->freason,
+ bif_export[BIF_length_1], BIF_ARG_3);
+ }
+ }
+}
+
+/*
+ * Trappable helper function for calculating length/1.
+ *
+ * When calling this function, entries in args[] should be set up as
+ * follows:
+ *
+ * args[0] = List to calculate length for.
+ * args[1] = Length accumulator (tagged integer).
+ *
+ * If the return value is a tagged integer, the length was calculated
+ * successfully.
+ *
+ * Otherwise, if return value is THE_NON_VALUE and p->freason is TRAP,
+ * the available reductions were exceeded and this function must be called
+ * again after rescheduling. args[0] and args[1] have been updated to
+ * contain the next part of the list and length so far, respectively.
+ *
+ * Otherwise, if return value is THE_NON_VALUE, the list did not end
+ * in an empty list (and p->freason is BADARG).
+ */
+
+Eterm erts_trapping_length_1(Process* p, Eterm* args)
+{
Eterm list;
Uint i;
-
- if (is_nil(BIF_ARG_1))
- BIF_RET(SMALL_ZERO);
- if (is_not_list(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- list = BIF_ARG_1;
- i = 0;
- while (is_list(list)) {
- i++;
+ Uint max_iter;
+ Uint saved_max_iter;
+
+#if defined(DEBUG) || defined(VALGRIND)
+ max_iter = 50;
+#else
+ max_iter = ERTS_BIF_REDS_LEFT(p) * 16;
+#endif
+ saved_max_iter = max_iter;
+ ASSERT(max_iter > 0);
+
+ list = args[0];
+ i = unsigned_val(args[1]);
+ while (is_list(list) && max_iter != 0) {
list = CDR(list_val(list));
+ i++, max_iter--;
+ }
+
+ if (is_list(list)) {
+ /*
+ * We have exceeded the alloted number of iterations.
+ * Save the result so far and signal a trap.
+ */
+ args[0] = list;
+ args[1] = make_small(i);
+ p->freason = TRAP;
+ BUMP_ALL_REDS(p);
+ return THE_NON_VALUE;
+ } else if (is_not_nil(list)) {
+ /* Error. Should be NIL. */
+ BIF_ERROR(p, BADARG);
}
- if (is_not_nil(list)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- BIF_RET(make_small(i));
+
+ /*
+ * We reached the end of the list successfully. Bump reductions
+ * and return result.
+ */
+ BUMP_REDS(p, (saved_max_iter - max_iter) / 16);
+ return make_small(i);
}
/* returns the size of a tuple or a binary */
@@ -229,7 +323,7 @@ BIF_RETTYPE size_1(BIF_ALIST_1)
if (IS_USMALL(0, sz)) {
return make_small(sz);
} else {
- Eterm* hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ Eterm* hp = HeapFragOnlyAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
BIF_RET(uint_to_big(sz, hp));
}
}
@@ -252,12 +346,12 @@ BIF_RETTYPE bit_size_1(BIF_ALIST_1)
if (IS_USMALL(0,low_bits)) {
BIF_RET(make_small(low_bits));
} else {
- Eterm* hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ Eterm* hp = HeapFragOnlyAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
BIF_RET(uint_to_big(low_bits, hp));
}
} else {
Uint sz = BIG_UINT_HEAP_SIZE+1;
- Eterm* hp = HAlloc(BIF_P, sz);
+ Eterm* hp = HeapFragOnlyAlloc(BIF_P, sz);
hp[0] = make_pos_bignum_header(sz-1);
BIG_DIGIT(hp,0) = low_bits;
BIG_DIGIT(hp,1) = high_bits;
@@ -281,7 +375,7 @@ BIF_RETTYPE byte_size_1(BIF_ALIST_1)
if (IS_USMALL(0, bytesize)) {
BIF_RET(make_small(bytesize));
} else {
- Eterm* hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ Eterm* hp = HeapFragOnlyAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
BIF_RET(uint_to_big(bytesize, hp));
}
} else {
@@ -325,7 +419,7 @@ double_to_integer(Process* p, double x)
}
sz = BIG_NEED_SIZE(ds); /* number of words including arity */
- hp = HAlloc(p, sz);
+ hp = HeapFragOnlyAlloc(p, sz);
res = make_big(hp);
xp = (ErtsDigit*) (hp + 1);
@@ -371,389 +465,3 @@ BIF_RETTYPE binary_part_2(BIF_ALIST_2)
badarg:
BIF_ERROR(BIF_P,BADARG);
}
-
-
-/*
- * The following code is used when a guard that may build on the
- * heap is called directly. They must not use HAlloc(), but must
- * do a garbage collection if there is insufficient heap space.
- *
- * Important note: All error checking MUST be done before doing
- * a garbage collection. The compiler assumes that all registers
- * are still valid if a guard BIF generates an exception.
- */
-
-#define ERTS_NEED_GC(p, need) ((HEAP_LIMIT((p)) - HEAP_TOP((p))) <= (need))
-
-Eterm erts_gc_length_1(Process* p, Eterm* reg, Uint live)
-{
- Eterm list = reg[live];
- int i;
-
- if (is_nil(list))
- return SMALL_ZERO;
- i = 0;
- while (is_list(list)) {
- i++;
- list = CDR(list_val(list));
- }
- if (is_not_nil(list)) {
- BIF_ERROR(p, BADARG);
- }
- return make_small(i);
-}
-
-Eterm erts_gc_size_1(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg = reg[live];
- if (is_tuple(arg)) {
- Eterm* tupleptr = tuple_val(arg);
- return make_small(arityval(*tupleptr));
- } else if (is_binary(arg)) {
- Uint sz = binary_size(arg);
- if (IS_USMALL(0, sz)) {
- return make_small(sz);
- } else {
- Eterm* hp;
- if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
- erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
- }
- hp = p->htop;
- p->htop += BIG_UINT_HEAP_SIZE;
- return uint_to_big(sz, hp);
- }
- }
- BIF_ERROR(p, BADARG);
-}
-
-Eterm erts_gc_bit_size_1(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg = reg[live];
- if (is_binary(arg)) {
- Uint low_bits;
- Uint bytesize;
- Uint high_bits;
- bytesize = binary_size(arg);
- high_bits = bytesize >> ((sizeof(Uint) * 8)-3);
- low_bits = (bytesize << 3) + binary_bitsize(arg);
- if (high_bits == 0) {
- if (IS_USMALL(0,low_bits)) {
- return make_small(low_bits);
- } else {
- Eterm* hp;
- if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
- erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
- }
- hp = p->htop;
- p->htop += BIG_UINT_HEAP_SIZE;
- return uint_to_big(low_bits, hp);
- }
- } else {
- Uint sz = BIG_UINT_HEAP_SIZE+1;
- Eterm* hp;
- if (ERTS_NEED_GC(p, sz)) {
- erts_garbage_collect(p, sz, reg, live);
- }
- hp = p->htop;
- p->htop += sz;
- hp[0] = make_pos_bignum_header(sz-1);
- BIG_DIGIT(hp,0) = low_bits;
- BIG_DIGIT(hp,1) = high_bits;
- return make_big(hp);
- }
- } else {
- BIF_ERROR(p, BADARG);
- }
-}
-
-Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg = reg[live];
- if (is_binary(arg)) {
- Uint bytesize = binary_size(arg);
- if (binary_bitsize(arg) > 0) {
- bytesize++;
- }
- if (IS_USMALL(0, bytesize)) {
- return make_small(bytesize);
- } else {
- Eterm* hp;
- if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
- erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
- }
- hp = p->htop;
- p->htop += BIG_UINT_HEAP_SIZE;
- return uint_to_big(bytesize, hp);
- }
- } else {
- BIF_ERROR(p, BADARG);
- }
-}
-
-Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg = reg[live];
- if (is_flatmap(arg)) {
- flatmap_t *mp = (flatmap_t*)flatmap_val(arg);
- return make_small(flatmap_get_size(mp));
- } else if (is_hashmap(arg)) {
- Eterm* hp;
- Uint size;
- size = hashmap_size(arg);
- if (IS_USMALL(0, size)) {
- return make_small(size);
- }
- if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
- erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
- }
- hp = p->htop;
- p->htop += BIG_UINT_HEAP_SIZE;
- return uint_to_big(size, hp);
- }
- p->fvalue = arg;
- BIF_ERROR(p, BADMAP);
-}
-
-Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg;
- Eterm res;
- Sint i0, i;
- Eterm* hp;
-
- arg = reg[live];
-
- /* integer arguments */
- if (is_small(arg)) {
- i0 = signed_val(arg);
- i = ERTS_SMALL_ABS(i0);
- if (i0 == MIN_SMALL) {
- if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
- erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live+1);
- arg = reg[live];
- }
- hp = p->htop;
- p->htop += BIG_UINT_HEAP_SIZE;
- return uint_to_big(i, hp);
- } else {
- return make_small(i);
- }
- } else if (is_big(arg)) {
- if (!big_sign(arg)) {
- return arg;
- } else {
- int sz = big_arity(arg) + 1;
- Uint* x;
-
- if (ERTS_NEED_GC(p, sz)) {
- erts_garbage_collect(p, sz, reg, live+1);
- arg = reg[live];
- }
- hp = p->htop;
- p->htop += sz;
- sz--;
- res = make_big(hp);
- x = big_val(arg);
- *hp++ = make_pos_bignum_header(sz);
- x++; /* skip thing */
- while(sz--)
- *hp++ = *x++;
- return res;
- }
- } else if (is_float(arg)) {
- FloatDef f;
-
- GET_DOUBLE(arg, f);
- if (f.fd < 0.0) {
- if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
- erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live+1);
- arg = reg[live];
- }
- hp = p->htop;
- p->htop += FLOAT_SIZE_OBJECT;
- f.fd = fabs(f.fd);
- res = make_float(hp);
- PUT_DOUBLE(f, hp);
- return res;
- }
- else
- return arg;
- }
- BIF_ERROR(p, BADARG);
-}
-
-Eterm erts_gc_float_1(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg;
- Eterm res;
- Eterm* hp;
- FloatDef f;
-
- /* check args */
- arg = reg[live];
- if (is_not_integer(arg)) {
- if (is_float(arg)) {
- return arg;
- } else {
- badarg:
- BIF_ERROR(p, BADARG);
- }
- }
- if (is_small(arg)) {
- Sint i = signed_val(arg);
- f.fd = i; /* use "C"'s auto casting */
- } else if (big_to_double(arg, &f.fd) < 0) {
- goto badarg;
- }
- if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
- erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live+1);
- arg = reg[live];
- }
- hp = p->htop;
- p->htop += FLOAT_SIZE_OBJECT;
- res = make_float(hp);
- PUT_DOUBLE(f, hp);
- return res;
-}
-
-Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg;
- FloatDef f;
-
- arg = reg[live];
- if (is_not_float(arg)) {
- if (is_integer(arg)) {
- return arg;
- }
- BIF_ERROR(p, BADARG);
- }
- GET_DOUBLE(arg, f);
-
- return gc_double_to_integer(p, round(f.fd), reg, live);
-}
-
-Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg;
- FloatDef f;
-
- arg = reg[live];
- if (is_not_float(arg)) {
- if (is_integer(arg)) {
- return arg;
- }
- BIF_ERROR(p, BADARG);
- }
- /* get the float */
- GET_DOUBLE(arg, f);
-
- /* truncate it and return the resultant integer */
- return gc_double_to_integer(p, (f.fd >= 0.0) ? floor(f.fd) : ceil(f.fd),
- reg, live);
-}
-
-Eterm erts_gc_floor_1(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg;
- FloatDef f;
-
- arg = reg[live];
- if (is_not_float(arg)) {
- if (is_integer(arg)) {
- return arg;
- }
- BIF_ERROR(p, BADARG);
- }
- GET_DOUBLE(arg, f);
- return gc_double_to_integer(p, floor(f.fd), reg, live);
-}
-
-Eterm erts_gc_ceil_1(Process* p, Eterm* reg, Uint live)
-{
- Eterm arg;
- FloatDef f;
-
- arg = reg[live];
- if (is_not_float(arg)) {
- if (is_integer(arg)) {
- return arg;
- }
- BIF_ERROR(p, BADARG);
- }
- GET_DOUBLE(arg, f);
- return gc_double_to_integer(p, ceil(f.fd), reg, live);
-}
-
-static Eterm
-gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live)
-{
- int is_negative;
- int ds;
- ErtsDigit* xp;
- int i;
- Eterm res;
- size_t sz;
- Eterm* hp;
- double dbase;
-
- if ((x < (double) (MAX_SMALL+1)) && (x > (double) (MIN_SMALL-1))) {
- Sint xi = x;
- return make_small(xi);
- }
-
- if (x >= 0) {
- is_negative = 0;
- } else {
- is_negative = 1;
- x = -x;
- }
-
- /* Unscale & (calculate exponent) */
- ds = 0;
- dbase = ((double)(D_MASK)+1);
- while(x >= 1.0) {
- x /= dbase; /* "shift" right */
- ds++;
- }
- sz = BIG_NEED_SIZE(ds); /* number of words including arity */
- if (ERTS_NEED_GC(p, sz)) {
- erts_garbage_collect(p, sz, reg, live);
- }
- hp = p->htop;
- p->htop += sz;
- res = make_big(hp);
- xp = (ErtsDigit*) (hp + 1);
-
- for (i = ds-1; i >= 0; i--) {
- ErtsDigit d;
-
- x *= dbase; /* "shift" left */
- d = x; /* trunc */
- xp[i] = d; /* store digit */
- x -= d; /* remove integer part */
- }
- while ((ds & (BIG_DIGITS_PER_WORD-1)) != 0) {
- xp[ds++] = 0;
- }
-
- if (is_negative) {
- *hp = make_neg_bignum_header(sz-1);
- } else {
- *hp = make_pos_bignum_header(sz-1);
- }
- return res;
-}
-
-/********************************************************************************
- * binary_part guards. The actual implementation is in erl_bif_binary.c
- ********************************************************************************/
-Eterm erts_gc_binary_part_3(Process* p, Eterm* reg, Uint live)
-{
- return erts_gc_binary_part(p,reg,live,0);
-}
-
-Eterm erts_gc_binary_part_2(Process* p, Eterm* reg, Uint live)
-{
- return erts_gc_binary_part(p,reg,live,1);
-}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 96f399fbbe..0339589b79 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -227,7 +227,7 @@ bld_magic_ref_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
}).
*/
-static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
+static int do_calc_mon_size(ErtsMonitor *mon, void *vpsz, Sint reds)
{
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
Uint *psz = vpsz;
@@ -238,7 +238,8 @@ static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
else
*psz += is_immed(mon->other.item) ? 0 : NC_HEAP_SIZE(mon->other.item);
- *psz += 9; /* CONS + 6-tuple */
+ *psz += 9; /* CONS + 6-tuple */
+ return 1;
}
typedef struct {
@@ -248,7 +249,7 @@ typedef struct {
Eterm tag;
} MonListContext;
-static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
+static int do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc, Sint reds)
{
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
MonListContext *pmlc = vpmlc;
@@ -319,6 +320,7 @@ static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
pmlc->hp += 7;
pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
pmlc->hp += 2;
+ return 1;
}
static Eterm
@@ -328,7 +330,7 @@ make_monitor_list(Process *p, int tree, ErtsMonitor *root, Eterm tail)
Uint sz = 0;
MonListContext mlc;
void (*foreach)(ErtsMonitor *,
- void (*)(ErtsMonitor *, void *),
+ ErtsMonitorFunc,
void *);
foreach = tree ? erts_monitor_tree_foreach : erts_monitor_list_foreach;
@@ -354,7 +356,7 @@ make_monitor_list(Process *p, int tree, ErtsMonitor *root, Eterm tail)
}).
*/
-static void calc_lnk_size(ErtsLink *lnk, void *vpsz)
+static int calc_lnk_size(ErtsLink *lnk, void *vpsz, Sint reds)
{
Uint *psz = vpsz;
Uint sz = 0;
@@ -364,7 +366,8 @@ static void calc_lnk_size(ErtsLink *lnk, void *vpsz)
*psz += sz;
*psz += is_immed(lnk->other.item) ? 0 : size_object(lnk->other.item);
- *psz += 7; /* CONS + 4-tuple */
+ *psz += 7; /* CONS + 4-tuple */
+ return 1;
}
typedef struct {
@@ -374,7 +377,7 @@ typedef struct {
Eterm tag;
} LnkListContext;
-static void make_one_lnk_element(ErtsLink *lnk, void * vpllc)
+static int make_one_lnk_element(ErtsLink *lnk, void * vpllc, Sint reds)
{
LnkListContext *pllc = vpllc;
Eterm tup, t, pid, id;
@@ -411,6 +414,7 @@ static void make_one_lnk_element(ErtsLink *lnk, void * vpllc)
pllc->hp += 5;
pllc->res = CONS(pllc->hp, tup, pllc->res);
pllc->hp += 2;
+ return 1;
}
static Eterm
@@ -420,7 +424,7 @@ make_link_list(Process *p, int tree, ErtsLink *root, Eterm tail)
Uint sz = 0;
LnkListContext llc;
void (*foreach)(ErtsLink *,
- void (*)(ErtsLink *, void *),
+ ErtsLinkFunc,
void *);
foreach = tree ? erts_link_tree_foreach : erts_link_list_foreach;
@@ -519,16 +523,17 @@ do { \
} \
} while (0)
-static void collect_one_link(ErtsLink *lnk, void *vmicp)
+static int collect_one_link(ErtsLink *lnk, void *vmicp, Sint reds)
{
MonitorInfoCollection *micp = vmicp;
EXTEND_MONITOR_INFOS(micp);
micp->mi[micp->mi_i].entity.term = lnk->other.item;
micp->sz += 2 + NC_HEAP_SIZE(lnk->other.item);
micp->mi_i++;
+ return 1;
}
-static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
+static int collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp, Sint reds)
{
if (erts_monitor_is_origin(mon)) {
MonitorInfoCollection *micp = vmicp;
@@ -573,9 +578,10 @@ static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
break;
}
}
+ return 1;
}
-static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
+static int collect_one_target_monitor(ErtsMonitor *mon, void *vmicp, Sint reds)
{
MonitorInfoCollection *micp = vmicp;
@@ -612,8 +618,8 @@ static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
default:
break;
}
-
}
+ return 1;
}
typedef struct {
@@ -653,8 +659,8 @@ do { \
} \
} while (0)
-static void
-collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp)
+static int
+collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp, Sint reds)
{
if (mon->type == ERTS_MON_TYPE_SUSPEND) {
Sint count;
@@ -678,6 +684,7 @@ collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp)
smicp->smi_i++;
}
+ return 1;
}
/*
@@ -761,7 +768,7 @@ static ErtsProcessInfoArgs pi_args[] = {
{am_memory, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
{am_garbage_collection, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + ERTS_MAX_HEAP_SIZE_MAP_SZ, 0, ERTS_PROC_LOCK_MAIN},
{am_group_leader, 0, 0, ERTS_PROC_LOCK_MAIN},
- {am_reductions, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_reductions, 0, 0, ERTS_PROC_LOCK_MAIN},
{am_priority, 0, 0, 0},
{am_trace, 0, 0, ERTS_PROC_LOCK_MAIN},
{am_binary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
@@ -2572,6 +2579,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
/* Need to be the only thread running... */
erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_P->scheduler_data->current_process = NULL;
erts_thr_progress_block();
if (BIF_ARG_1 == am_info)
@@ -2585,6 +2593,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
erts_thr_progress_unblock();
erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_P->scheduler_data->current_process = BIF_P;
ASSERT(dsbufp && dsbufp->str);
res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
@@ -2705,9 +2714,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
goto bld_instruction_counts;
}
-#ifdef DEBUG
ASSERT(endp == hp);
-#endif
BIF_RET(res);
#endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
@@ -3018,6 +3025,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_nif_taints(BIF_P));
} else if (ERTS_IS_ATOM_STR("reader_groups_map", BIF_ARG_1)) {
BIF_RET(erts_get_reader_groups_map(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("decentralized_counter_groups_map", BIF_ARG_1)) {
+ BIF_RET(erts_get_decentralized_counter_groups_map(BIF_P));
} else if (ERTS_IS_ATOM_STR("dist_buf_busy_limit", BIF_ARG_1)) {
Uint hsz = 0;
@@ -3146,14 +3155,16 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
-static void monitor_size(ErtsMonitor *mon, void *vsz)
+static int monitor_size(ErtsMonitor *mon, void *vsz, Sint reds)
{
*((Uint *) vsz) = erts_monitor_size(mon);
+ return 1;
}
-static void link_size(ErtsMonitor *lnk, void *vsz)
+static int link_size(ErtsMonitor *lnk, void *vsz, Sint reds)
{
*((Uint *) vsz) = erts_link_size(lnk);
+ return 1;
}
/**********************************************************************/
@@ -4225,7 +4236,10 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
Uint dflags = (TERM_TO_BINARY_DFLAGS
& ~DFLAG_EXPORT_PTR_TAG
& ~DFLAG_BIT_BINARIES);
- BIF_RET(erts_term_to_binary(BIF_P, tp[2], 0, dflags));
+ Eterm res = erts_term_to_binary(BIF_P, tp[2], 0, dflags);
+ if (is_value(res))
+ BIF_RET(res);
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
}
else if (ERTS_IS_ATOM_STR("dist_ctrl", tp[1])) {
Eterm res = am_undefined;
@@ -4432,6 +4446,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_P->fcalls = reds;
else
BIF_P->fcalls = reds - CONTEXT_REDS;
+ BIF_P->scheduler_data->virtual_reds = 0;
}
BIF_RET(am_true);
}
@@ -4596,18 +4611,17 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
}
else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
- if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
- int flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
- if (erts_debug_wait_completed(BIF_P, flag)) {
- ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
- }
- }
- if (ERTS_IS_ATOM_STR("timer_cancellations", BIF_ARG_2)) {
- int flag = ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS;
- if (erts_debug_wait_completed(BIF_P, flag)) {
- ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
- }
- }
+ int flag = 0;
+ if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2))
+ flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
+ else if (ERTS_IS_ATOM_STR("timer_cancellations", BIF_ARG_2))
+ flag = ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS;
+ else if (ERTS_IS_ATOM_STR("aux_work", BIF_ARG_2))
+ flag = ERTS_DEBUG_WAIT_COMPLETED_AUX_WORK;
+
+ if (flag && erts_debug_wait_completed(BIF_P, flag)) {
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
+ }
}
else if (ERTS_IS_ATOM_STR("broken_halt", BIF_ARG_1)) {
erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
@@ -4674,6 +4688,22 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(am_notsup);
#endif
}
+ else if (ERTS_IS_ATOM_STR("ets_force_split", BIF_ARG_1)) {
+ if (is_tuple(BIF_ARG_2)) {
+ Eterm* tpl = tuple_val(BIF_ARG_2);
+
+ if (erts_ets_force_split(tpl[1], tpl[2] == am_true))
+ BIF_RET(am_ok);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("ets_debug_random_split_join", BIF_ARG_1)) {
+ if (is_tuple(BIF_ARG_2)) {
+ Eterm* tpl = tuple_val(BIF_ARG_2);
+
+ if (erts_ets_debug_random_split_join(tpl[1], tpl[2] == am_true))
+ BIF_RET(am_ok);
+ }
+ }
else if (ERTS_IS_ATOM_STR("mbuf", BIF_ARG_1)) {
Uint sz = size_object(BIF_ARG_2);
ErlHeapFragment* frag = new_message_buffer(sz);
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index aaf262780f..b23fa77f5f 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -35,101 +35,270 @@
static Eterm keyfind(int Bif, Process* p, Eterm Key, Eterm Pos, Eterm List);
+/* erlang:'++'/2
+ *
+ * Adds a list to another (LHS ++ RHS). For historical reasons this is
+ * implemented by copying LHS and setting its tail to RHS without checking
+ * that RHS is a proper list. [] ++ 'not_a_list' will therefore result in
+ * 'not_a_list', and [1,2] ++ 3 will result in [1,2|3], and this is a bug that
+ * we have to live with. */
-static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
-{
- Eterm list;
- Eterm copy;
- Eterm last;
- Eterm* hp = NULL;
- Sint i;
+typedef struct {
+ Eterm lhs_original;
+ Eterm rhs_original;
- list = A;
+ Eterm iterator;
- if (is_nil(list)) {
- BIF_RET(B);
- }
+ Eterm result;
+ Eterm *result_cdr;
+} ErtsAppendContext;
+
+static int append_ctx_bin_dtor(Binary *context_bin) {
+ return 1;
+}
+
+static Eterm append_create_trap_state(Process *p,
+ ErtsAppendContext *from_context) {
+ ErtsAppendContext *to_context;
+ Binary *state_bin;
+ Eterm *hp;
+
+ state_bin = erts_create_magic_binary(sizeof(ErtsAppendContext),
+ append_ctx_bin_dtor);
+
+ to_context = ERTS_MAGIC_BIN_DATA(state_bin);
+ *to_context = *from_context;
- if (is_not_list(list)) {
- BIF_ERROR(p, BADARG);
+ if (from_context->result_cdr == &from_context->result) {
+ to_context->result_cdr = &to_context->result;
}
- /* optimistic append on heap first */
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(p), state_bin);
+}
+
+static BIF_RETTYPE lists_append_alloc(Process *p, ErtsAppendContext *context) {
+ static const Uint CELLS_PER_RED = 40;
+
+ Eterm *alloc_top, *alloc_end;
+ Uint cells_left, max_cells;
+ Eterm lookahead;
+
+ cells_left = max_cells = CELLS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ lookahead = context->iterator;
- if ((i = HeapWordsLeft(p) / 2) < 4) {
- goto list_tail;
+#ifdef DEBUG
+ cells_left = max_cells = max_cells / 10 + 1;
+#endif
+
+ while (cells_left != 0 && is_list(lookahead)) {
+ lookahead = CDR(list_val(lookahead));
+ cells_left--;
}
- hp = HEAP_TOP(p);
- copy = last = CONS(hp, CAR(list_val(list)), make_list(hp+2));
- list = CDR(list_val(list));
- hp += 2;
- i -= 2; /* don't use the last 2 words (extra i--;) */
-
- while(i-- && is_list(list)) {
- Eterm* listp = list_val(list);
- last = CONS(hp, CAR(listp), make_list(hp+2));
- list = CDR(listp);
- hp += 2;
+ BUMP_REDS(p, (max_cells - cells_left) / CELLS_PER_RED);
+
+ if (is_not_list(lookahead) && is_not_nil(lookahead)) {
+ /* It's possible that we're erroring out with an incomplete list, so it
+ * must be terminated or we'll leave a hole in the heap. */
+ *context->result_cdr = NIL;
+ return -1;
}
- /* A is proper and B is NIL return A as-is, don't update HTOP */
+ alloc_top = HAlloc(p, 2 * (max_cells - cells_left));
+ alloc_end = alloc_top + 2 * (max_cells - cells_left);
+
+ while (alloc_top < alloc_end) {
+ Eterm *cell = list_val(context->iterator);
+
+ ASSERT(context->iterator != lookahead);
+
+ *context->result_cdr = make_list(alloc_top);
+ context->result_cdr = &CDR(alloc_top);
+ CAR(alloc_top) = CAR(cell);
- if (is_nil(list) && is_nil(B)) {
- BIF_RET(A);
+ context->iterator = CDR(cell);
+ alloc_top += 2;
}
- if (is_nil(list)) {
- HEAP_TOP(p) = hp;
- CDR(list_val(last)) = B;
- BIF_RET(copy);
+ if (is_list(context->iterator)) {
+ /* The result only has to be terminated when returning it to the user,
+ * but we're doing it when trapping as well to prevent headaches when
+ * debugging. */
+ *context->result_cdr = NIL;
+ ASSERT(cells_left == 0);
+ return 0;
}
-list_tail:
+ *context->result_cdr = context->rhs_original;
+ ASSERT(is_nil(context->iterator));
+
+ if (is_nil(context->rhs_original)) {
+ /* The list we created was equal to the original, so we'll return that
+ * in the hopes that the garbage we created can be removed soon. */
+ context->result = context->lhs_original;
+ }
+
+ return 1;
+}
+
+static BIF_RETTYPE lists_append_onheap(Process *p, ErtsAppendContext *context) {
+ static const Uint CELLS_PER_RED = 60;
+
+ Eterm *alloc_start, *alloc_top, *alloc_end;
+ Uint cells_left, max_cells;
+
+ cells_left = max_cells = CELLS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+
+#ifdef DEBUG
+ cells_left = max_cells = max_cells / 10 + 1;
+#endif
+
+ ASSERT(HEAP_LIMIT(p) >= HEAP_TOP(p) + 2);
+ alloc_start = HEAP_TOP(p);
+ alloc_end = HEAP_LIMIT(p) - 2;
+ alloc_top = alloc_start;
+
+ /* Don't process more cells than we have reductions for. */
+ alloc_end = MIN(alloc_top + (cells_left * 2), alloc_end);
+
+ while (alloc_top < alloc_end && is_list(context->iterator)) {
+ Eterm *cell = list_val(context->iterator);
- if ((i = erts_list_length(list)) < 0) {
- BIF_ERROR(p, BADARG);
+ *context->result_cdr = make_list(alloc_top);
+ context->result_cdr = &CDR(alloc_top);
+ CAR(alloc_top) = CAR(cell);
+
+ context->iterator = CDR(cell);
+ alloc_top += 2;
}
- /* remaining list was proper and B is NIL */
- if (is_nil(B)) {
- BIF_RET(A);
+ cells_left -= (alloc_top - alloc_start) / 2;
+ HEAP_TOP(p) = alloc_top;
+
+ ASSERT(cells_left >= 0 && cells_left <= max_cells);
+ BUMP_REDS(p, (max_cells - cells_left) / CELLS_PER_RED);
+
+ if (is_not_list(context->iterator) && is_not_nil(context->iterator)) {
+ *context->result_cdr = NIL;
+ return -1;
}
- if (hp) {
- /* Note: fall through case, already written
- * on the heap.
- * The last 2 words of the heap is not written yet
- */
- Eterm *hp_save = hp;
- ASSERT(i != 0);
- HEAP_TOP(p) = hp + 2;
- if (i == 1) {
- hp[0] = CAR(list_val(list));
- hp[1] = B;
- BIF_RET(copy);
+ if (is_list(context->iterator)) {
+ if (cells_left > CELLS_PER_RED) {
+ return lists_append_alloc(p, context);
}
- hp = HAlloc(p, 2*(i - 1));
- last = CONS(hp_save, CAR(list_val(list)), make_list(hp));
- } else {
- hp = HAlloc(p, 2*i);
- copy = last = CONS(hp, CAR(list_val(list)), make_list(hp+2));
- hp += 2;
+
+ *context->result_cdr = NIL;
+ return 0;
+ }
+
+ *context->result_cdr = context->rhs_original;
+ ASSERT(is_nil(context->iterator));
+
+ if (is_nil(context->rhs_original)) {
+ context->result = context->lhs_original;
}
- list = CDR(list_val(list));
- i--;
+ return 1;
+}
+
+static int append_continue(Process *p, ErtsAppendContext *context) {
+ /* We build the result on the unused part of the heap if possible to save
+ * us the trouble of having to figure out the list size. We fall back to
+ * lists_append_alloc when we run out of space. */
+ if (HeapWordsLeft(p) > 8) {
+ return lists_append_onheap(p, context);
+ }
+
+ return lists_append_alloc(p, context);
+}
+
+static int append_start(Process *p, Eterm lhs, Eterm rhs,
+ ErtsAppendContext *context) {
+ context->lhs_original = lhs;
+ context->rhs_original = rhs;
+
+ context->result_cdr = &context->result;
+ context->result = NIL;
+
+ context->iterator = lhs;
+
+ return append_continue(p, context);
+}
+
+/* erlang:'++'/2 */
+static Eterm append(Export *bif_entry, BIF_ALIST_2) {
+ Eterm lhs = BIF_ARG_1, rhs = BIF_ARG_2;
+
+ if (is_nil(lhs)) {
+ /* This is buggy but expected, `[] ++ 'not_a_list'` has always resulted
+ * in 'not_a_list'. */
+ return rhs;
+ } else if (is_list(lhs)) {
+ /* We start with the context on the stack in the hopes that we won't
+ * have to trap. */
+ ErtsAppendContext context;
+ int res;
+
+ res = append_start(BIF_P, lhs, rhs, &context);
+
+ if (res == 0) {
+ Eterm state_mref;
+
+ state_mref = append_create_trap_state(BIF_P, &context);
+ erts_set_gc_state(BIF_P, 0);
+
+ BIF_TRAP2(bif_entry, BIF_P, state_mref, NIL);
+ }
+
+ if (res < 0) {
+ ASSERT(is_nil(*context.result_cdr));
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ASSERT(*context.result_cdr == context.rhs_original);
+ BIF_RET(context.result);
+ } else if (is_internal_magic_ref(lhs)) {
+ ErtsAppendContext *context;
+ int (*dtor)(Binary*);
+ Binary *magic_bin;
+
+ int res;
+
+ magic_bin = erts_magic_ref2bin(lhs);
+ dtor = ERTS_MAGIC_BIN_DESTRUCTOR(magic_bin);
+
+ if (dtor != append_ctx_bin_dtor) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+ ASSERT(rhs == NIL);
- ASSERT(i > -1);
- while(i--) {
- Eterm* listp = list_val(list);
- last = CONS(hp, CAR(listp), make_list(hp+2));
- list = CDR(listp);
- hp += 2;
+ context = ERTS_MAGIC_BIN_DATA(magic_bin);
+ res = append_continue(BIF_P, context);
+
+ if (res == 0) {
+ BIF_TRAP2(bif_entry, BIF_P, lhs, NIL);
+ }
+
+ erts_set_gc_state(BIF_P, 1);
+
+ if (res < 0) {
+ ASSERT(is_nil(*context->result_cdr));
+ ERTS_BIF_ERROR_TRAPPED2(BIF_P, BADARG, bif_entry,
+ context->lhs_original,
+ context->rhs_original);
+ }
+
+ ASSERT(*context->result_cdr == context->rhs_original);
+ BIF_RET(context->result);
}
- CDR(list_val(last)) = B;
- BIF_RET(copy);
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+
+ BIF_ERROR(BIF_P, BADARG);
}
/*
@@ -139,12 +308,12 @@ list_tail:
Eterm
ebif_plusplus_2(BIF_ALIST_2)
{
- return append(BIF_P, BIF_ARG_1, BIF_ARG_2);
+ return append(bif_export[BIF_ebif_plusplus_2], BIF_CALL_ARGS);
}
BIF_RETTYPE append_2(BIF_ALIST_2)
{
- return append(BIF_P, BIF_ARG_1, BIF_ARG_2);
+ return append(bif_export[BIF_append_2], BIF_CALL_ARGS);
}
/* erlang:'--'/2
@@ -915,7 +1084,7 @@ static BIF_RETTYPE lists_reverse_alloc(Process *c_p,
list = list_in;
tail = tail_in;
- cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p));
+ cells_left = max_cells = CELLS_PER_RED * ERTS_BIF_REDS_LEFT(c_p);
lookahead = list;
while (cells_left != 0 && is_list(lookahead)) {
@@ -964,7 +1133,7 @@ static BIF_RETTYPE lists_reverse_onheap(Process *c_p,
list = list_in;
tail = tail_in;
- cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p));
+ cells_left = max_cells = CELLS_PER_RED * ERTS_BIF_REDS_LEFT(c_p);
ASSERT(HEAP_LIMIT(c_p) >= HEAP_TOP(c_p) + 2);
alloc_start = HEAP_TOP(c_p);
@@ -992,8 +1161,6 @@ static BIF_RETTYPE lists_reverse_onheap(Process *c_p,
if (is_nil(list)) {
BIF_RET(tail);
} else if (is_list(list)) {
- ASSERT(is_list(tail));
-
if (cells_left > CELLS_PER_RED) {
return lists_reverse_alloc(c_p, list, tail);
}
diff --git a/erts/emulator/beam/erl_bif_persistent.c b/erts/emulator/beam/erl_bif_persistent.c
index 5a78a043ce..f38e0cc5cb 100644
--- a/erts/emulator/beam/erl_bif_persistent.c
+++ b/erts/emulator/beam/erl_bif_persistent.c
@@ -37,15 +37,6 @@
#include "erl_binary.h"
/*
- * The limit for the number of persistent terms before
- * a warning is issued.
- */
-
-#define WARNING_LIMIT 20000
-#define XSTR(s) STR(s)
-#define STR(s) #s
-
-/*
* Parameters for the hash table.
*/
#define INITIAL_SIZE 8
@@ -73,14 +64,69 @@ typedef struct trap_data {
Uint memory; /* Used by info/0 to count used memory */
} TrapData;
+typedef enum {
+ ERTS_PERSISTENT_TERM_CPY_PLACE_START,
+ ERTS_PERSISTENT_TERM_CPY_PLACE_1,
+ ERTS_PERSISTENT_TERM_CPY_PLACE_2,
+ ERTS_PERSISTENT_TERM_CPY_PLACE_3
+} ErtsPersistentTermCpyTableLocation;
+
+typedef enum {
+ ERTS_PERSISTENT_TERM_CPY_NO_REHASH = 0,
+ ERTS_PERSISTENT_TERM_CPY_REHASH = 1,
+ ERTS_PERSISTENT_TERM_CPY_TEMP = 2
+} ErtsPersistentTermCpyTableType;
+
+typedef struct {
+ HashTable* old_table; /* in param */
+ Uint new_size; /* in param */
+ ErtsPersistentTermCpyTableType copy_type; /* in param */
+ Uint max_iterations; /* in param */
+ ErtsPersistentTermCpyTableLocation location; /* in/out param */
+ Uint iterations_done; /* in/out param */
+ Uint total_iterations_done; /* in/out param */
+ HashTable* new_table; /* out param */
+} ErtsPersistentTermCpyTableCtx;
+
+typedef enum {
+ PUT2_TRAP_LOCATION_NEW_KEY,
+ PUT2_TRAP_LOCATION_REPLACE_VALUE
+} ErtsPersistentTermPut2TrapLocation;
+
+typedef struct {
+ ErtsPersistentTermPut2TrapLocation trap_location;
+ Eterm key;
+ Eterm term;
+ Uint entry_index;
+ HashTable* hash_table;
+ Eterm heap[3];
+ Eterm tuple;
+ ErtsPersistentTermCpyTableCtx cpy_ctx;
+} ErtsPersistentTermPut2Context;
+
+typedef enum {
+ ERASE1_TRAP_LOCATION_TMP_COPY,
+ ERASE1_TRAP_LOCATION_FINAL_COPY
+} ErtsPersistentTermErase1TrapLocation;
+
+typedef struct {
+ ErtsPersistentTermErase1TrapLocation trap_location;
+ Eterm key;
+ HashTable* old_table;
+ HashTable* new_table;
+ Uint entry_index;
+ Eterm old_term;
+ HashTable* tmp_table;
+ ErtsPersistentTermCpyTableCtx cpy_ctx;
+} ErtsPersistentTermErase1Context;
+
/*
* Declarations of local functions.
*/
static HashTable* create_initial_table(void);
static Uint lookup(HashTable* hash_table, Eterm key);
-static HashTable* copy_table(HashTable* old_table, Uint new_size, int rehash);
-static HashTable* tmp_table_copy(HashTable* old_table);
+static HashTable* copy_table(ErtsPersistentTermCpyTableCtx* ctx);
static int try_seize_update_permission(Process* c_p);
static void release_update_permission(int release_updater);
static void table_updater(void* table);
@@ -127,7 +173,6 @@ static Process* updater_process = NULL;
/* Protected by update_table_permission_mtx */
static ErtsThrPrgrLaterOp thr_prog_op;
-static int issued_warning = 0;
/*
* Queue of hash tables to be deleted.
@@ -139,7 +184,7 @@ static HashTable** delete_queue_tail = &delete_queue_head;
/*
* The following variables are only used during crash dumping. They
- * are intialized by erts_init_persistent_dumping().
+ * are initialized by erts_init_persistent_dumping().
*/
ErtsLiteralArea** erts_persistent_areas;
@@ -188,101 +233,181 @@ void erts_init_bif_persistent_term(void)
&persistent_term_info_trap);
}
+/*
+ * Macro used for trapping in persistent_term_put_2 and
+ * persistent_term_erase_1
+ */
+#define TRAPPING_COPY_TABLE(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME, TRAP_CODE) \
+ do { \
+ ctx->cpy_ctx = (ErtsPersistentTermCpyTableCtx){ \
+ .old_table = OLD_TABLE, \
+ .new_size = NEW_SIZE, \
+ .copy_type = COPY_TYPE, \
+ .location = ERTS_PERSISTENT_TERM_CPY_PLACE_START \
+ }; \
+ L_ ## LOC_NAME: \
+ ctx->cpy_ctx.max_iterations = MAX(1, max_iterations); \
+ TABLE_DEST = copy_table(&ctx->cpy_ctx); \
+ iterations_until_trap -= ctx->cpy_ctx.total_iterations_done; \
+ if (TABLE_DEST == NULL) { \
+ ctx->trap_location = LOC_NAME; \
+ erts_set_gc_state(BIF_P, 0); \
+ BUMP_ALL_REDS(BIF_P); \
+ TRAP_CODE; \
+ } \
+ } while (0)
+
+static int persistent_term_put_2_ctx_bin_dtor(Binary *context_bin)
+{
+ ErtsPersistentTermPut2Context* ctx = ERTS_MAGIC_BIN_DATA(context_bin);
+ if (ctx->cpy_ctx.new_table != NULL) {
+ erts_free(ERTS_ALC_T_PERSISTENT_TERM, ctx->cpy_ctx.new_table);
+ release_update_permission(0);
+ }
+ return 1;
+}
+/*
+ * A linear congruential generator that is used in the debug emulator
+ * to trap after a random number of iterations in
+ * persistent_term_put_2 and persistent_term_erase_1.
+ *
+ * https://en.wikipedia.org/wiki/Linear_congruential_generator
+ */
+#define GET_SMALL_RANDOM_INT(SEED) \
+ (1103515245 * (SEED) + 12345) % 227
+
BIF_RETTYPE persistent_term_put_2(BIF_ALIST_2)
{
- Eterm key;
- Eterm term;
- Eterm heap[3];
- Eterm tuple;
- HashTable* hash_table;
- Uint term_size;
- Uint lit_area_size;
- ErlOffHeap code_off_heap;
- ErtsLiteralArea* literal_area;
- erts_shcopy_t info;
- Eterm* ptr;
- Uint entry_index;
+ static const Uint ITERATIONS_PER_RED = 32;
+ ErtsPersistentTermPut2Context* ctx;
+ Eterm state_mref = THE_NON_VALUE;
+ long iterations_until_trap;
+ long max_iterations;
+#define PUT_TRAP_CODE \
+ BIF_TRAP2(bif_export[BIF_persistent_term_put_2], BIF_P, state_mref, BIF_ARG_2)
+#define TRAPPING_COPY_TABLE_PUT(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME) \
+ TRAPPING_COPY_TABLE(TABLE_DEST, OLD_TABLE, NEW_SIZE, COPY_TYPE, LOC_NAME, PUT_TRAP_CODE)
+
+#ifdef DEBUG
+ (void)ITERATIONS_PER_RED;
+ iterations_until_trap = max_iterations =
+ GET_SMALL_RANDOM_INT(ERTS_BIF_REDS_LEFT(BIF_P) + (Uint)&ctx);
+#else
+ iterations_until_trap = max_iterations =
+ ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(BIF_P);
+#endif
+ if (is_internal_magic_ref(BIF_ARG_1) &&
+ (ERTS_MAGIC_BIN_DESTRUCTOR(erts_magic_ref2bin(BIF_ARG_1)) ==
+ persistent_term_put_2_ctx_bin_dtor)) {
+ /* Restore state after a trap */
+ Binary* state_bin;
+ state_mref = BIF_ARG_1;
+ state_bin = erts_magic_ref2bin(state_mref);
+ ctx = ERTS_MAGIC_BIN_DATA(state_bin);
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+ erts_set_gc_state(BIF_P, 1);
+ switch (ctx->trap_location) {
+ case PUT2_TRAP_LOCATION_NEW_KEY:
+ goto L_PUT2_TRAP_LOCATION_NEW_KEY;
+ case PUT2_TRAP_LOCATION_REPLACE_VALUE:
+ goto L_PUT2_TRAP_LOCATION_REPLACE_VALUE;
+ }
+ } else {
+ /* Save state in magic bin in case trapping is necessary */
+ Eterm* hp;
+ Binary* state_bin = erts_create_magic_binary(sizeof(ErtsPersistentTermPut2Context),
+ persistent_term_put_2_ctx_bin_dtor);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ state_mref = erts_mk_magic_ref(&hp, &MSO(BIF_P), state_bin);
+ ctx = ERTS_MAGIC_BIN_DATA(state_bin);
+ /*
+ * IMPORTANT: The following field is used to detect if
+ * persistent_term_put_2_ctx_bin_dtor needs to free memory
+ */
+ ctx->cpy_ctx.new_table = NULL;
+ }
+
if (!try_seize_update_permission(BIF_P)) {
ERTS_BIF_YIELD2(bif_export[BIF_persistent_term_put_2],
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
+ ctx->hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
- hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ ctx->key = BIF_ARG_1;
+ ctx->term = BIF_ARG_2;
- key = BIF_ARG_1;
- term = BIF_ARG_2;
+ ctx->entry_index = lookup(ctx->hash_table, ctx->key);
- entry_index = lookup(hash_table, key);
-
- heap[0] = make_arityval(2);
- heap[1] = key;
- heap[2] = term;
- tuple = make_tuple(heap);
+ ctx->heap[0] = make_arityval(2);
+ ctx->heap[1] = ctx->key;
+ ctx->heap[2] = ctx->term;
+ ctx->tuple = make_tuple(ctx->heap);
- if (is_nil(hash_table->term[entry_index])) {
- Uint size = hash_table->allocated;
- if (MUST_GROW(hash_table)) {
- size *= 2;
+ if (is_nil(ctx->hash_table->term[ctx->entry_index])) {
+ Uint new_size = ctx->hash_table->allocated;
+ if (MUST_GROW(ctx->hash_table)) {
+ new_size *= 2;
}
- hash_table = copy_table(hash_table, size, 0);
- entry_index = lookup(hash_table, key);
- hash_table->num_entries++;
+ TRAPPING_COPY_TABLE_PUT(ctx->hash_table,
+ ctx->hash_table,
+ new_size,
+ ERTS_PERSISTENT_TERM_CPY_NO_REHASH,
+ PUT2_TRAP_LOCATION_NEW_KEY);
+ ctx->entry_index = lookup(ctx->hash_table, ctx->key);
+ ctx->hash_table->num_entries++;
} else {
- Eterm tuple = hash_table->term[entry_index];
+ Eterm tuple = ctx->hash_table->term[ctx->entry_index];
Eterm old_term;
ASSERT(is_tuple_arity(tuple, 2));
old_term = boxed_val(tuple)[2];
- if (EQ(term, old_term)) {
+ if (EQ(ctx->term, old_term)) {
/* Same value. No need to update anything. */
release_update_permission(0);
BIF_RET(am_ok);
} else {
/* Mark the old term for deletion. */
- mark_for_deletion(hash_table, entry_index);
- hash_table = copy_table(hash_table, hash_table->allocated, 0);
+ mark_for_deletion(ctx->hash_table, ctx->entry_index);
+ TRAPPING_COPY_TABLE_PUT(ctx->hash_table,
+ ctx->hash_table,
+ ctx->hash_table->allocated,
+ ERTS_PERSISTENT_TERM_CPY_NO_REHASH,
+ PUT2_TRAP_LOCATION_REPLACE_VALUE);
}
}
- /*
- * Preserve internal sharing in the term by using the
- * sharing-preserving functions. However, literals must
- * be copied in case the module holding them are unloaded.
- */
- INITIALIZE_SHCOPY(info);
- info.copy_literals = 1;
- term_size = copy_shared_calculate(tuple, &info);
- ERTS_INIT_OFF_HEAP(&code_off_heap);
- lit_area_size = ERTS_LITERAL_AREA_ALLOC_SIZE(term_size);
- literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_area_size);
- ptr = &literal_area->start[0];
- literal_area->end = ptr + term_size;
- tuple = copy_shared_perform(tuple, term_size, &info, &ptr, &code_off_heap);
- ASSERT(tuple_val(tuple) == literal_area->start);
- literal_area->off_heap = code_off_heap.first;
- DESTROY_SHCOPY(info);
- erts_set_literal_tag(&tuple, literal_area->start, term_size);
- hash_table->term[entry_index] = tuple;
-
- erts_schedule_thr_prgr_later_op(table_updater, hash_table, &thr_prog_op);
- suspend_updater(BIF_P);
-
- /*
- * Issue a warning once if the warning limit has been exceeded.
- */
-
- if (hash_table->num_entries > WARNING_LIMIT && issued_warning == 0) {
- static char w[] =
- "More than " XSTR(WARNING_LIMIT) " persistent terms "
- "have been created.\n"
- "It is recommended to avoid creating an excessive number of\n"
- "persistent terms, as creation and deletion of persistent terms\n"
- "will be slower as the number of persistent terms increases.\n";
- issued_warning = 1;
- erts_send_warning_to_logger_str(BIF_P->group_leader, w);
+ {
+ Uint term_size;
+ Uint lit_area_size;
+ ErlOffHeap code_off_heap;
+ ErtsLiteralArea* literal_area;
+ erts_shcopy_t info;
+ Eterm* ptr;
+ /*
+ * Preserve internal sharing in the term by using the
+ * sharing-preserving functions. However, literals must
+ * be copied in case the module holding them are unloaded.
+ */
+ INITIALIZE_SHCOPY(info);
+ info.copy_literals = 1;
+ term_size = copy_shared_calculate(ctx->tuple, &info);
+ ERTS_INIT_OFF_HEAP(&code_off_heap);
+ lit_area_size = ERTS_LITERAL_AREA_ALLOC_SIZE(term_size);
+ literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_area_size);
+ ptr = &literal_area->start[0];
+ literal_area->end = ptr + term_size;
+ ctx->tuple = copy_shared_perform(ctx->tuple, term_size, &info, &ptr, &code_off_heap);
+ ASSERT(tuple_val(ctx->tuple) == literal_area->start);
+ literal_area->off_heap = code_off_heap.first;
+ DESTROY_SHCOPY(info);
+ erts_set_literal_tag(&ctx->tuple, literal_area->start, term_size);
+ ctx->hash_table->term[ctx->entry_index] = ctx->tuple;
+
+ erts_schedule_thr_prgr_later_op(table_updater, ctx->hash_table, &thr_prog_op);
+ suspend_updater(BIF_P);
}
-
+ BUMP_REDS(BIF_P, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED);
ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
}
@@ -349,26 +474,84 @@ BIF_RETTYPE persistent_term_get_2(BIF_ALIST_2)
BIF_RET(result);
}
-BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1)
+static int persistent_term_erase_1_ctx_bin_dtor(Binary *context_bin)
{
- Eterm key = BIF_ARG_1;
- HashTable* old_table;
- HashTable* new_table;
- Uint entry_index;
- Eterm old_term;
+ ErtsPersistentTermErase1Context* ctx = ERTS_MAGIC_BIN_DATA(context_bin);
+ if (ctx->cpy_ctx.new_table != NULL) {
+ if (ctx->cpy_ctx.copy_type == ERTS_PERSISTENT_TERM_CPY_TEMP) {
+ erts_free(ERTS_ALC_T_PERSISTENT_TERM_TMP, ctx->cpy_ctx.new_table);
+ } else {
+ erts_free(ERTS_ALC_T_PERSISTENT_TERM, ctx->cpy_ctx.new_table);
+ }
+ if (ctx->tmp_table != NULL) {
+ erts_free(ERTS_ALC_T_PERSISTENT_TERM_TMP, ctx->tmp_table);
+ }
+ release_update_permission(0);
+ }
+ return 1;
+}
+BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1)
+{
+ static const Uint ITERATIONS_PER_RED = 32;
+ ErtsPersistentTermErase1Context* ctx;
+ Eterm state_mref = THE_NON_VALUE;
+ long iterations_until_trap;
+ long max_iterations;
+#ifdef DEBUG
+ (void)ITERATIONS_PER_RED;
+ iterations_until_trap = max_iterations =
+ GET_SMALL_RANDOM_INT(ERTS_BIF_REDS_LEFT(BIF_P) + (Uint)&ctx);
+#else
+ iterations_until_trap = max_iterations =
+ ITERATIONS_PER_RED * ERTS_BIF_REDS_LEFT(BIF_P);
+#endif
+#define ERASE_TRAP_CODE \
+ BIF_TRAP1(bif_export[BIF_persistent_term_erase_1], BIF_P, state_mref);
+#define TRAPPING_COPY_TABLE_ERASE(TABLE_DEST, OLD_TABLE, NEW_SIZE, REHASH, LOC_NAME) \
+ TRAPPING_COPY_TABLE(TABLE_DEST, OLD_TABLE, NEW_SIZE, REHASH, LOC_NAME, ERASE_TRAP_CODE)
+ if (is_internal_magic_ref(BIF_ARG_1) &&
+ (ERTS_MAGIC_BIN_DESTRUCTOR(erts_magic_ref2bin(BIF_ARG_1)) ==
+ persistent_term_erase_1_ctx_bin_dtor)) {
+ /* Restore the state after a trap */
+ Binary* state_bin;
+ state_mref = BIF_ARG_1;
+ state_bin = erts_magic_ref2bin(state_mref);
+ ctx = ERTS_MAGIC_BIN_DATA(state_bin);
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+ erts_set_gc_state(BIF_P, 1);
+ switch (ctx->trap_location) {
+ case ERASE1_TRAP_LOCATION_TMP_COPY:
+ goto L_ERASE1_TRAP_LOCATION_TMP_COPY;
+ case ERASE1_TRAP_LOCATION_FINAL_COPY:
+ goto L_ERASE1_TRAP_LOCATION_FINAL_COPY;
+ }
+ } else {
+ /* Save state in magic bin in case trapping is necessary */
+ Eterm* hp;
+ Binary* state_bin = erts_create_magic_binary(sizeof(ErtsPersistentTermErase1Context),
+ persistent_term_erase_1_ctx_bin_dtor);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ state_mref = erts_mk_magic_ref(&hp, &MSO(BIF_P), state_bin);
+ ctx = ERTS_MAGIC_BIN_DATA(state_bin);
+ /*
+ * IMPORTANT: The following two fields are used to detect if
+ * persistent_term_erase_1_ctx_bin_dtor needs to free memory
+ */
+ ctx->cpy_ctx.new_table = NULL;
+ ctx->tmp_table = NULL;
+ }
if (!try_seize_update_permission(BIF_P)) {
ERTS_BIF_YIELD1(bif_export[BIF_persistent_term_erase_1],
BIF_P, BIF_ARG_1);
}
- old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
- entry_index = lookup(old_table, key);
- old_term = old_table->term[entry_index];
- if (is_boxed(old_term)) {
+ ctx->key = BIF_ARG_1;
+ ctx->old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ ctx->entry_index = lookup(ctx->old_table, ctx->key);
+ ctx->old_term = ctx->old_table->term[ctx->entry_index];
+ if (is_boxed(ctx->old_term)) {
Uint new_size;
- HashTable* tmp_table;
-
/*
* Since we don't use any delete markers, we must rehash
* the table when deleting terms to ensure that all terms
@@ -378,8 +561,12 @@ BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1)
* temporary table copy of the same size as the old one.
*/
- ASSERT(is_tuple_arity(old_term, 2));
- tmp_table = tmp_table_copy(old_table);
+ ASSERT(is_tuple_arity(ctx->old_term, 2));
+ TRAPPING_COPY_TABLE_ERASE(ctx->tmp_table,
+ ctx->old_table,
+ ctx->old_table->allocated,
+ ERTS_PERSISTENT_TERM_CPY_TEMP,
+ ERASE1_TRAP_LOCATION_TMP_COPY);
/*
* Delete the term from the temporary table. Then copy the
@@ -387,18 +574,28 @@ BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1)
* while copying.
*/
- tmp_table->term[entry_index] = NIL;
- tmp_table->num_entries--;
- new_size = tmp_table->allocated;
- if (MUST_SHRINK(tmp_table)) {
+ ctx->tmp_table->term[ctx->entry_index] = NIL;
+ ctx->tmp_table->num_entries--;
+ new_size = ctx->tmp_table->allocated;
+ if (MUST_SHRINK(ctx->tmp_table)) {
new_size /= 2;
}
- new_table = copy_table(tmp_table, new_size, 1);
- erts_free(ERTS_ALC_T_TMP, tmp_table);
+ TRAPPING_COPY_TABLE_ERASE(ctx->new_table,
+ ctx->tmp_table,
+ new_size,
+ ERTS_PERSISTENT_TERM_CPY_REHASH,
+ ERASE1_TRAP_LOCATION_FINAL_COPY);
+ erts_free(ERTS_ALC_T_PERSISTENT_TERM_TMP, ctx->tmp_table);
+ /*
+ * IMPORTANT: Memory management depends on that ctx->tmp_table
+ * is set to NULL on the line below
+ */
+ ctx->tmp_table = NULL;
- mark_for_deletion(old_table, entry_index);
- erts_schedule_thr_prgr_later_op(table_updater, new_table, &thr_prog_op);
+ mark_for_deletion(ctx->old_table, ctx->entry_index);
+ erts_schedule_thr_prgr_later_op(table_updater, ctx->new_table, &thr_prog_op);
suspend_updater(BIF_P);
+ BUMP_REDS(BIF_P, (max_iterations - iterations_until_trap) / ITERATIONS_PER_RED);
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
}
@@ -406,7 +603,7 @@ BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1)
* Key is not present. Nothing to do.
*/
- ASSERT(is_nil(old_term));
+ ASSERT(is_nil(ctx->old_term));
release_update_permission(0);
BIF_RET(am_false);
}
@@ -740,65 +937,104 @@ lookup(HashTable* hash_table, Eterm key)
}
static HashTable*
-tmp_table_copy(HashTable* old_table)
+copy_table(ErtsPersistentTermCpyTableCtx* ctx)
{
- Uint size = old_table->allocated;
- HashTable* tmp_table;
+ Uint old_size = ctx->old_table->allocated;
Uint i;
-
- tmp_table = (HashTable *) erts_alloc(ERTS_ALC_T_TMP,
- sizeof(HashTable) +
- sizeof(Eterm) * (size-1));
- *tmp_table = *old_table;
- for (i = 0; i < size; i++) {
- tmp_table->term[i] = old_table->term[i];
+ ErtsAlcType_t alloc_type;
+ ctx->total_iterations_done = 0;
+ switch(ctx->location) {
+ case ERTS_PERSISTENT_TERM_CPY_PLACE_1: goto L_copy_table_place_1;
+ case ERTS_PERSISTENT_TERM_CPY_PLACE_2: goto L_copy_table_place_2;
+ case ERTS_PERSISTENT_TERM_CPY_PLACE_3: goto L_copy_table_place_3;
+ case ERTS_PERSISTENT_TERM_CPY_PLACE_START:
+ ctx->iterations_done = 0;
}
- return tmp_table;
-}
-
-static HashTable*
-copy_table(HashTable* old_table, Uint new_size, int rehash)
-{
- HashTable* new_table;
- Uint old_size = old_table->allocated;
- Uint i;
-
- new_table = (HashTable *) erts_alloc(ERTS_ALC_T_PERSISTENT_TERM,
- sizeof(HashTable) +
- sizeof(Eterm) * (new_size-1));
- if (old_table->allocated == new_size && !rehash) {
+ if (ctx->copy_type == ERTS_PERSISTENT_TERM_CPY_TEMP) {
+ alloc_type = ERTS_ALC_T_PERSISTENT_TERM_TMP;
+ } else {
+ alloc_type = ERTS_ALC_T_PERSISTENT_TERM;
+ }
+ ctx->new_table = (HashTable *) erts_alloc(alloc_type,
+ sizeof(HashTable) +
+ sizeof(Eterm) * (ctx->new_size-1));
+ if (ctx->old_table->allocated == ctx->new_size &&
+ (ctx->copy_type == ERTS_PERSISTENT_TERM_CPY_NO_REHASH ||
+ ctx->copy_type == ERTS_PERSISTENT_TERM_CPY_TEMP)) {
/*
* Same size and no key deleted. Make an exact copy of the table.
*/
- *new_table = *old_table;
- for (i = 0; i < new_size; i++) {
- new_table->term[i] = old_table->term[i];
+ *ctx->new_table = *ctx->old_table;
+ L_copy_table_place_1:
+ for (i = ctx->iterations_done;
+ i < MIN(ctx->iterations_done + ctx->max_iterations,
+ ctx->new_size);
+ i++) {
+ ctx->new_table->term[i] = ctx->old_table->term[i];
}
+ ctx->total_iterations_done = (i - ctx->iterations_done);
+ if (i < ctx->new_size) {
+ ctx->iterations_done = i;
+ ctx->location = ERTS_PERSISTENT_TERM_CPY_PLACE_1;
+ return NULL;
+ }
+ ctx->iterations_done = 0;
} else {
/*
* The size of the table has changed or an element has been
* deleted. Must rehash, by inserting all old terms into the
* new (empty) table.
*/
- new_table->allocated = new_size;
- new_table->num_entries = old_table->num_entries;
- new_table->mask = new_size - 1;
- for (i = 0; i < new_size; i++) {
- new_table->term[i] = NIL;
+ ctx->new_table->allocated = ctx->new_size;
+ ctx->new_table->num_entries = ctx->old_table->num_entries;
+ ctx->new_table->mask = ctx->new_size - 1;
+ L_copy_table_place_2:
+ for (i = ctx->iterations_done;
+ i < MIN(ctx->iterations_done + ctx->max_iterations,
+ ctx->new_size);
+ i++) {
+ ctx->new_table->term[i] = NIL;
+ }
+ ctx->total_iterations_done = (i - ctx->iterations_done);
+ ctx->max_iterations -= ctx->total_iterations_done;
+ if (i < ctx->new_size) {
+ ctx->iterations_done = i;
+ ctx->location = ERTS_PERSISTENT_TERM_CPY_PLACE_2;
+ return NULL;
}
- for (i = 0; i < old_size; i++) {
- if (is_tuple(old_table->term[i])) {
- Eterm key = tuple_val(old_table->term[i])[1];
- Uint entry_index = lookup(new_table, key);
- ASSERT(is_nil(new_table->term[entry_index]));
- new_table->term[entry_index] = old_table->term[i];
+ ctx->iterations_done = 0;
+ L_copy_table_place_3:
+ for (i = ctx->iterations_done;
+ i < MIN(ctx->iterations_done + ctx->max_iterations,
+ old_size);
+ i++) {
+ if (is_tuple(ctx->old_table->term[i])) {
+ Eterm key = tuple_val(ctx->old_table->term[i])[1];
+ Uint entry_index = lookup(ctx->new_table, key);
+ ASSERT(is_nil(ctx->new_table->term[entry_index]));
+ ctx->new_table->term[entry_index] = ctx->old_table->term[i];
}
}
+ ctx->total_iterations_done += (i - ctx->iterations_done);
+ if (i < old_size) {
+ ctx->iterations_done = i;
+ ctx->location = ERTS_PERSISTENT_TERM_CPY_PLACE_3;
+ return NULL;
+ }
+ ctx->iterations_done = 0;
+ }
+ ctx->new_table->first_to_delete = 0;
+ ctx->new_table->num_to_delete = 0;
+ erts_atomic_init_nob(&ctx->new_table->refc, (erts_aint_t)1);
+ {
+ HashTable* new_table = ctx->new_table;
+ /*
+ * IMPORTANT: Memory management depends on that ctx->new_table is
+ * set to NULL on the line below
+ */
+ ctx->new_table = NULL;
+ return new_table;
}
- new_table->first_to_delete = 0;
- new_table->num_to_delete = 0;
- erts_atomic_init_nob(&new_table->refc, (erts_aint_t)1);
- return new_table;
}
static void
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index bbc64eb9aa..e0b9202fe7 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -532,10 +532,7 @@ re_compile(Process* p, Eterm arg1, Eterm arg2)
int options = 0;
int pflags = 0;
int unicode = 0;
-#ifdef DEBUG
int buffres;
-#endif
-
if (parse_options(arg2,&options,NULL,&pflags,NULL,NULL,NULL,NULL)
< 0) {
@@ -556,12 +553,8 @@ re_compile(Process* p, Eterm arg1, Eterm arg2)
BIF_ERROR(p,BADARG);
}
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
-#ifdef DEBUG
- buffres =
-#endif
- erts_iolist_to_buf(arg1, expr, slen);
-
- ASSERT(buffres >= 0);
+ buffres = erts_iolist_to_buf(arg1, expr, slen);
+ ASSERT(buffres >= 0); (void)buffres;
expr[slen]='\0';
result = erts_pcre_compile2(expr, options, &errcode,
@@ -1052,9 +1045,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
tmpb[ap->len] = '\0';
} else {
ErlDrvSizeT slen;
-#ifdef DEBUG
int buffres;
-#endif
if (erts_iolist_size(val, &slen)) {
goto error;
@@ -1068,11 +1059,8 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
}
}
-#ifdef DEBUG
- buffres =
-#endif
- erts_iolist_to_buf(val, tmpb, slen);
- ASSERT(buffres >= 0);
+ buffres = erts_iolist_to_buf(val, tmpb, slen);
+ ASSERT(buffres >= 0); (void)buffres;
tmpb[slen] = '\0';
}
build_one_capture(code,&ri,&sallocated,has_dupnames,tmpb);
@@ -1145,9 +1133,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
const char *errstr = "";
int errofset = 0;
int capture_count;
-#ifdef DEBUG
int buffres;
-#endif
if (pflags & PARSE_FLAG_UNICODE &&
(!is_binary(arg2) || !is_binary(arg1) ||
@@ -1161,12 +1147,8 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
-#ifdef DEBUG
- buffres =
-#endif
- erts_iolist_to_buf(arg2, expr, slen);
-
- ASSERT(buffres >= 0);
+ buffres = erts_iolist_to_buf(arg2, expr, slen);
+ ASSERT(buffres >= 0); (void)buffres;
expr[slen]='\0';
result = erts_pcre_compile2(expr, comp_options, &errcode,
@@ -1317,9 +1299,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
restart.subject = (char *) (pb->bytes+offset);
restart.flags |= RESTART_FLAG_SUBJECT_IN_BINARY;
} else {
-#ifdef DEBUG
int buffres;
-#endif
handle_iolist:
if (erts_iolist_size(arg1, &slength)) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
@@ -1331,11 +1311,8 @@ handle_iolist:
}
restart.subject = erts_alloc(ERTS_ALC_T_RE_SUBJECT, slength);
-#ifdef DEBUG
- buffres =
-#endif
- erts_iolist_to_buf(arg1, restart.subject, slength);
- ASSERT(buffres >= 0);
+ buffres = erts_iolist_to_buf(arg1, restart.subject, slength);
+ ASSERT(buffres >= 0); (void)buffres;
}
if (pflags & PARSE_FLAG_REPORT_ERRORS) {
@@ -1457,10 +1434,7 @@ re_inspect_2(BIF_ALIST_2)
Eterm res;
const pcre *code;
byte *temp_alloc = NULL;
-#ifdef DEBUG
- int infores;
-#endif
-
+ int infores;
if (is_not_tuple(BIF_ARG_1) || (arityval(*tuple_val(BIF_ARG_1)) != 5)) {
goto error;
@@ -1484,12 +1458,8 @@ re_inspect_2(BIF_ALIST_2)
if (erts_pcre_fullinfo(code, NULL, PCRE_INFO_OPTIONS, &options) != 0)
goto error;
-#ifdef DEBUG
- infores =
-#endif
- erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMECOUNT, &top);
-
- ASSERT(infores == 0);
+ infores = erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMECOUNT, &top);
+ ASSERT(infores == 0); (void)infores;
if (top <= 0) {
hp = HAlloc(BIF_P, 3);
@@ -1497,18 +1467,10 @@ re_inspect_2(BIF_ALIST_2)
erts_free_aligned_binary_bytes(temp_alloc);
BIF_RET(res);
}
-#ifdef DEBUG
- infores =
-#endif
- erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMEENTRYSIZE, &entrysize);
-
+ infores = erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMEENTRYSIZE, &entrysize);
ASSERT(infores == 0);
-#ifdef DEBUG
- infores =
-#endif
- erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMETABLE, &nametable);
-
+ infores = erts_pcre_fullinfo(code, NULL, PCRE_INFO_NAMETABLE, &nametable);
ASSERT(infores == 0);
has_dupnames = ((options & PCRE_DUPNAMES) != 0);
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index 08edb43c49..c9c047255a 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -278,7 +278,6 @@ Eterm erts_bin_bytes_to_list(Eterm previous, Eterm* hp, byte* bytes, Uint size,
*/
BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg, Export *bif);
-BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is_tuple);
BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen);
@@ -315,6 +314,7 @@ ERTS_GLB_INLINE Binary *erts_bin_drv_alloc(Uint size);
ERTS_GLB_INLINE Binary *erts_bin_nrml_alloc(Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc_fnf(Binary *bp, Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc(Binary *bp, Uint size);
+ERTS_GLB_INLINE void erts_magic_binary_free(Binary *bp);
ERTS_GLB_INLINE void erts_bin_free(Binary *bp);
ERTS_GLB_INLINE void erts_bin_release(Binary *bp);
ERTS_GLB_INLINE Binary *erts_create_magic_binary_x(Uint size,
@@ -447,6 +447,13 @@ erts_bin_realloc(Binary *bp, Uint size)
}
ERTS_GLB_INLINE void
+erts_magic_binary_free(Binary *bp)
+{
+ erts_magic_ref_remove_bin(ERTS_MAGIC_BIN_REFN(bp));
+ erts_free(ERTS_MAGIC_BIN_ATYPE(bp), (void *) bp);
+}
+
+ERTS_GLB_INLINE void
erts_bin_free(Binary *bp)
{
if (bp->intern.flags & BIN_FLAG_MAGIC) {
@@ -454,8 +461,7 @@ erts_bin_free(Binary *bp)
/* Destructor took control of the deallocation */
return;
}
- erts_magic_ref_remove_bin(ERTS_MAGIC_BIN_REFN(bp));
- erts_free(ERTS_MAGIC_BIN_ATYPE(bp), (void *) bp);
+ erts_magic_binary_free(bp);
}
else if (bp->intern.flags & BIN_FLAG_DRV)
erts_free(ERTS_ALC_T_DRV_BINARY, (void *) bp);
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 3a16913473..f5807d25d7 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -144,6 +144,42 @@ erts_bs_start_match_2(Process *p, Eterm Binary, Uint Max)
return make_matchstate(ms);
}
+ErlBinMatchState *erts_bs_start_match_3(Process *p, Eterm Binary)
+{
+ Eterm Orig;
+ Uint offs;
+ Uint* hp;
+ Uint NeededSize;
+ ErlBinMatchState *ms;
+ Uint bitoffs;
+ Uint bitsize;
+ Uint total_bin_size;
+ ProcBin* pb;
+
+ ASSERT(is_binary(Binary));
+ total_bin_size = binary_size(Binary);
+ if ((total_bin_size >> (8*sizeof(Uint)-3)) != 0) {
+ return NULL;
+ }
+
+ NeededSize = ERL_BIN_MATCHSTATE_SIZE(0);
+ hp = HeapOnlyAlloc(p, NeededSize);
+ ms = (ErlBinMatchState *) hp;
+ ERTS_GET_REAL_BIN(Binary, Orig, offs, bitoffs, bitsize);
+ pb = (ProcBin *) boxed_val(Orig);
+ if (pb->thing_word == HEADER_PROC_BIN && pb->flags != 0) {
+ erts_emasculate_writable_binary(pb);
+ }
+
+ ms->thing_word = HEADER_BIN_MATCHSTATE(0);
+ (ms->mb).orig = Orig;
+ (ms->mb).base = binary_bytes(Orig);
+ (ms->mb).offset = 8 * offs + bitoffs;
+ (ms->mb).size = total_bin_size * 8 + (ms->mb).offset + bitsize;
+
+ return ms;
+}
+
#ifdef DEBUG
# define CHECK_MATCH_BUFFER(MB) check_match_buffer(MB)
@@ -1295,6 +1331,14 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
}
}
+ if (build_size_in_bits == 0) {
+ if (c_p->stop - c_p->htop < extra_words) {
+ (void) erts_garbage_collect(c_p, extra_words, reg, live+1);
+ bin = reg[live];
+ }
+ return bin;
+ }
+
if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
c_p->freason = SYSTEM_LIMIT;
return THE_NON_VALUE;
@@ -1352,13 +1396,13 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
Uint bitsize;
Eterm* hp;
- /*
+ /*
* Allocate heap space.
*/
heap_need = PROC_BIN_SIZE + ERL_SUB_BIN_SIZE + extra_words;
if (c_p->stop - c_p->htop < heap_need) {
(void) erts_garbage_collect(c_p, heap_need, reg, live+1);
- bin = reg[live];
+ bin = reg[live];
}
hp = c_p->htop;
@@ -1376,6 +1420,10 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
}
}
+ if (build_size_in_bits == 0) {
+ return bin;
+ }
+
if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
c_p->freason = SYSTEM_LIMIT;
return THE_NON_VALUE;
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index 7beef5cfda..50d353e1fa 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -73,12 +73,16 @@ struct erl_bits_state {
typedef struct erl_bin_match_struct{
Eterm thing_word;
ErlBinMatchBuffer mb; /* Present match buffer */
- Eterm save_offset[1]; /* Saved offsets */
+ Eterm save_offset[1]; /* Saved offsets, only valid for contexts
+ * created through bs_start_match2. */
} ErlBinMatchState;
-#define ERL_BIN_MATCHSTATE_SIZE(_Max) ((sizeof(ErlBinMatchState) + (_Max)*sizeof(Eterm))/sizeof(Eterm))
-#define HEADER_BIN_MATCHSTATE(_Max) _make_header(ERL_BIN_MATCHSTATE_SIZE((_Max))-1, _TAG_HEADER_BIN_MATCHSTATE)
-#define HEADER_NUM_SLOTS(hdr) (header_arity(hdr)-sizeof(ErlBinMatchState)/sizeof(Eterm)+1)
+#define ERL_BIN_MATCHSTATE_SIZE(_Max) \
+ ((offsetof(ErlBinMatchState, save_offset) + (_Max)*sizeof(Eterm))/sizeof(Eterm))
+#define HEADER_BIN_MATCHSTATE(_Max) \
+ _make_header(ERL_BIN_MATCHSTATE_SIZE((_Max)) - 1, _TAG_HEADER_BIN_MATCHSTATE)
+#define HEADER_NUM_SLOTS(hdr) \
+ (header_arity(hdr) - (offsetof(ErlBinMatchState, save_offset) / sizeof(Eterm)) + 1)
#define make_matchstate(_Ms) make_boxed((Eterm*)(_Ms))
#define ms_matchbuffer(_Ms) &(((ErlBinMatchState*) boxed_val(_Ms))->mb)
@@ -144,6 +148,7 @@ void erts_bits_destroy_state(ERL_BITS_PROTO_0);
*/
Eterm erts_bs_start_match_2(Process *p, Eterm Bin, Uint Max);
+ErlBinMatchState *erts_bs_start_match_3(Process *p, Eterm Bin);
Eterm erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb);
Eterm erts_bs_get_binary_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb);
Eterm erts_bs_get_float_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb);
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index 6f8d2f8c35..6a4f43297e 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -34,6 +34,7 @@
#include "error.h"
#include "bif.h"
#include "erl_cpu_topology.h"
+#include "erl_flxctr.h"
#define ERTS_MAX_READER_GROUPS 64
@@ -58,6 +59,7 @@ static erts_cpu_info_t *cpuinfo;
static int max_main_threads;
static int reader_groups;
+static int decentralized_counter_groups;
static ErtsCpuBindData *scheduler2cpu_map;
static erts_rwmtx_t cpuinfo_rwmtx;
@@ -127,6 +129,8 @@ static erts_cpu_groups_map_t *cpu_groups_maps;
static erts_cpu_groups_map_t *reader_groups_map;
+static erts_cpu_groups_map_t *decentralized_counter_groups_map;
+
#define ERTS_TOPOLOGY_CG ERTS_TOPOLOGY_MAX_DEPTH
#define ERTS_MAX_CPU_TOPOLOGY_ID ((int) 0xffff)
@@ -138,6 +142,7 @@ static void cpu_bind_order_sort(erts_cpu_topology_t *cpudata,
static void write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size);
static void reader_groups_callback(int, ErtsSchedulerData *, int, void *);
+static void flxctr_groups_callback(int, ErtsSchedulerData *, int, void *);
static erts_cpu_groups_map_t *add_cpu_groups(int groups,
erts_cpu_groups_callback_t callback,
void *arg);
@@ -1646,7 +1651,8 @@ erts_get_logical_processors(int *conf, int *onln, int *avail)
}
void
-erts_pre_early_init_cpu_topology(int *max_rg_p,
+erts_pre_early_init_cpu_topology(int *max_dcg_p,
+ int *max_rg_p,
int *conf_p,
int *onln_p,
int *avail_p)
@@ -1654,6 +1660,7 @@ erts_pre_early_init_cpu_topology(int *max_rg_p,
cpu_groups_maps = NULL;
no_cpu_groups_callbacks = 0;
*max_rg_p = ERTS_MAX_READER_GROUPS;
+ *max_dcg_p = ERTS_MAX_FLXCTR_GROUPS;
cpuinfo = erts_cpu_info_create();
get_logical_processors(conf_p, onln_p, avail_p);
}
@@ -1662,7 +1669,9 @@ void
erts_early_init_cpu_topology(int no_schedulers,
int *max_main_threads_p,
int max_reader_groups,
- int *reader_groups_p)
+ int *reader_groups_p,
+ int max_decentralized_counter_groups,
+ int *decentralized_counter_groups_p)
{
user_cpudata = NULL;
user_cpudata_size = 0;
@@ -1687,6 +1696,12 @@ erts_early_init_cpu_topology(int no_schedulers,
max_main_threads = no_schedulers;
*max_main_threads_p = max_main_threads;
+ decentralized_counter_groups = max_main_threads;
+ if (decentralized_counter_groups <= 1 || max_decentralized_counter_groups <= 1)
+ decentralized_counter_groups = 1;
+ if (decentralized_counter_groups > max_decentralized_counter_groups)
+ decentralized_counter_groups = max_decentralized_counter_groups;
+ *decentralized_counter_groups_p = decentralized_counter_groups;
reader_groups = max_main_threads;
if (reader_groups <= 1 || max_reader_groups <= 1)
reader_groups = 0;
@@ -1718,6 +1733,9 @@ erts_init_cpu_topology(void)
reader_groups_map = add_cpu_groups(reader_groups,
reader_groups_callback,
NULL);
+ decentralized_counter_groups_map = add_cpu_groups(decentralized_counter_groups,
+ flxctr_groups_callback,
+ NULL);
if (cpu_bind_order == ERTS_CPU_BIND_NONE)
erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
@@ -1789,6 +1807,15 @@ reader_groups_callback(int suspending,
erts_rwmtx_set_reader_group(suspending ? 0 : group+1);
}
+void
+flxctr_groups_callback(int suspending,
+ ErtsSchedulerData *esdp,
+ int group,
+ void *unused)
+{
+ erts_flxctr_set_slot(suspending ? 0 : group+1);
+}
+
static Eterm get_cpu_groups_map(Process *c_p,
erts_cpu_groups_map_t *map,
int offset);
@@ -1821,6 +1848,16 @@ erts_get_reader_groups_map(Process *c_p)
return res;
}
+Eterm
+erts_get_decentralized_counter_groups_map(Process *c_p)
+{
+ Eterm res;
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
+ res = get_cpu_groups_map(c_p, decentralized_counter_groups_map, 1);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
+ return res;
+}
+
/*
* CPU groups
*/
diff --git a/erts/emulator/beam/erl_cpu_topology.h b/erts/emulator/beam/erl_cpu_topology.h
index 88bcad79ab..4a428d7972 100644
--- a/erts/emulator/beam/erl_cpu_topology.h
+++ b/erts/emulator/beam/erl_cpu_topology.h
@@ -27,14 +27,19 @@
#ifndef ERL_CPU_TOPOLOGY_H__
#define ERL_CPU_TOPOLOGY_H__
-void erts_pre_early_init_cpu_topology(int *max_rg_p,
- int *conf_p,
- int *onln_p,
- int *avail_p);
-void erts_early_init_cpu_topology(int no_schedulers,
- int *max_main_threads_p,
- int max_reader_groups,
- int *reader_groups_p);
+void
+erts_pre_early_init_cpu_topology(int *max_dcg_p,
+ int *max_rg_p,
+ int *conf_p,
+ int *onln_p,
+ int *avail_p);
+void
+erts_early_init_cpu_topology(int no_schedulers,
+ int *max_main_threads_p,
+ int max_reader_groups,
+ int *reader_groups_p,
+ int max_decentralized_counter_groups,
+ int *decentralized_counter_groups_p);
void erts_init_cpu_topology(void);
@@ -70,6 +75,7 @@ Eterm erts_bind_schedulers(Process *c_p, Eterm how);
Eterm erts_get_schedulers_binds(Process *c_p);
Eterm erts_get_reader_groups_map(Process *c_p);
+Eterm erts_get_decentralized_counter_groups_map(Process *c_p);
Eterm erts_set_cpu_topology(Process *c_p, Eterm term);
Eterm erts_get_cpu_topology_term(Process *c_p, Eterm which);
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index c009a3bde8..d24f30f126 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -42,6 +42,7 @@
#include "bif.h"
#include "big.h"
#include "erl_binary.h"
+#include "bif.h"
erts_atomic_t erts_ets_misc_mem_size;
@@ -64,6 +65,11 @@ do { \
} \
}while(0)
+#define DB_GET_APPROX_NITEMS(DB) \
+ erts_flxctr_read_approx(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID)
+#define DB_GET_APPROX_MEM_CONSUMED(DB) \
+ erts_flxctr_read_approx(&(DB)->common.counters, ERTS_DB_TABLE_MEM_COUNTER_ID)
+
static BIF_RETTYPE db_bif_fail(Process* p, Uint freason,
Uint bif_ix, Export* bif_exp)
{
@@ -81,16 +87,9 @@ static BIF_RETTYPE db_bif_fail(Process* p, Uint freason,
/* Get a key from any table structure and a tagged object */
#define TERM_GETKEY(tb, obj) db_getkey((tb)->common.keypos, (obj))
-
-/* How safe are we from double-hits or missed objects
-** when iterating without fixation? */
-enum DbIterSafety {
- ITER_UNSAFE, /* Must fixate to be safe */
- ITER_SAFE_LOCKED, /* Safe while table is locked, not between trap calls */
- ITER_SAFE /* No need to fixate at all */
-};
# define ITERATION_SAFETY(Proc,Tab) \
- ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \
+ ((IS_TREE_TABLE((Tab)->common.status) || IS_CATREE_TABLE((Tab)->common.status) \
+ || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \
: (((Tab)->common.status & DB_FINE_LOCKED) ? ITER_UNSAFE : ITER_SAFE_LOCKED))
#define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP))
@@ -194,9 +193,6 @@ static int fixed_tabs_find(DbFixation* first, DbFixation* fix)
#define ERTS_RBT_WANT_DELETE
#define ERTS_RBT_WANT_FOREACH
#define ERTS_RBT_WANT_FOREACH_DESTROY
-#ifdef DEBUG
-# define ERTS_RBT_WANT_LOOKUP
-#endif
#define ERTS_RBT_UNDEF
#include "erl_rbtree.h"
@@ -359,6 +355,7 @@ typedef enum {
extern DbTableMethod db_hash;
extern DbTableMethod db_tree;
+extern DbTableMethod db_catree;
int user_requested_db_max_tabs;
int erts_ets_realloc_always_moves;
@@ -407,21 +404,19 @@ static void
free_dbtable(void *vtb)
{
DbTable *tb = (DbTable *) vtb;
-#ifdef HARDDEBUG
- if (erts_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) {
- erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n",
- erts_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable),
- tb->common.fixations);
- }
-#endif
- erts_rwmtx_destroy(&tb->common.rwlock);
- erts_mtx_destroy(&tb->common.fixlock);
- ASSERT(is_immed(tb->common.heir_data));
+ ASSERT(erts_flxctr_is_snapshot_ongoing(&tb->common.counters) ||
+ sizeof(DbTable) == erts_flxctr_read_approx(&tb->common.counters,
+ ERTS_DB_TABLE_MEM_COUNTER_ID));
- if (tb->common.btid)
- erts_bin_release(tb->common.btid);
+ erts_rwmtx_destroy(&tb->common.rwlock);
+ erts_mtx_destroy(&tb->common.fixlock);
+ ASSERT(is_immed(tb->common.heir_data));
- erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
+ if (tb->common.btid)
+ erts_bin_release(tb->common.btid);
+
+ erts_flxctr_destroy(&tb->common.counters, ERTS_ALC_T_DB_TABLE);
+ erts_free(ERTS_ALC_T_DB_TABLE, tb);
}
static void schedule_free_dbtable(DbTable* tb)
@@ -1076,7 +1071,7 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_element_3);
UseTmpHeap(2,BIF_P);
- if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
+ if (!(tb->common.status & (DB_SET | DB_ORDERED_SET | DB_CA_ORDERED_SET))) {
goto bail_out;
}
if (is_tuple(BIF_ARG_3)) {
@@ -1165,7 +1160,7 @@ do_update_counter(Process *p, DbTable* tb,
UseTmpHeap(5, p);
- if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
+ if (!(tb->common.status & (DB_SET | DB_ORDERED_SET | DB_CA_ORDERED_SET))) {
goto bail_out;
}
if (is_integer(arg3)) { /* Incr */
@@ -1647,15 +1642,15 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
val = CAR(list_val(list));
if (val == am_bag) {
status |= DB_BAG;
- status &= ~(DB_SET | DB_DUPLICATE_BAG | DB_ORDERED_SET);
+ status &= ~(DB_SET | DB_DUPLICATE_BAG | DB_ORDERED_SET | DB_CA_ORDERED_SET);
}
else if (val == am_duplicate_bag) {
status |= DB_DUPLICATE_BAG;
- status &= ~(DB_SET | DB_BAG | DB_ORDERED_SET);
+ status &= ~(DB_SET | DB_BAG | DB_ORDERED_SET | DB_CA_ORDERED_SET);
}
else if (val == am_ordered_set) {
status |= DB_ORDERED_SET;
- status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG);
+ status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG | DB_CA_ORDERED_SET);
}
else if (is_tuple(val)) {
Eterm *tp = tuple_val(val);
@@ -1716,7 +1711,13 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
if (is_not_nil(list)) { /* bad opt or not a well formed list */
BIF_ERROR(BIF_P, BADARG);
}
- if (IS_HASH_TABLE(status)) {
+ if (IS_TREE_TABLE(status) && is_fine_locked && !(status & DB_PRIVATE)) {
+ meth = &db_catree;
+ status |= DB_CA_ORDERED_SET;
+ status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG | DB_ORDERED_SET);
+ status |= DB_FINE_LOCKED;
+ }
+ else if (IS_HASH_TABLE(status)) {
meth = &db_hash;
if (is_fine_locked && !(status & DB_PRIVATE)) {
status |= DB_FINE_LOCKED;
@@ -1738,12 +1739,16 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
*/
{
DbTable init_tb;
-
- erts_atomic_init_nob(&init_tb.common.memory_size, 0);
+ erts_flxctr_init(&init_tb.common.counters, 0, 2, ERTS_ALC_T_DB_TABLE);
tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb, sizeof(DbTable));
- erts_atomic_init_nob(&tb->common.memory_size,
- erts_atomic_read_nob(&init_tb.common.memory_size));
+ erts_flxctr_init(&tb->common.counters,
+ status & DB_CA_ORDERED_SET,
+ 2,
+ ERTS_ALC_T_DB_TABLE);
+ erts_flxctr_add(&tb->common.counters,
+ ERTS_DB_TABLE_MEM_COUNTER_ID,
+ DB_GET_APPROX_MEM_CONSUMED(&init_tb));
}
tb->common.meth = meth;
@@ -1757,8 +1762,6 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.owner = BIF_P->common.id;
set_heir(BIF_P, tb, heir, heir_data);
- erts_atomic_init_nob(&tb->common.nitems, 0);
-
tb->common.fixing_procs = NULL;
tb->common.compress = is_compressed;
#ifdef ETS_DBG_FORCE_TRAP
@@ -2135,19 +2138,18 @@ BIF_RETTYPE ets_internal_delete_all_2(BIF_ALIST_2)
{
SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
SWord reds = initial_reds;
- Eterm nitems;
+ Eterm nitems_holder = THE_NON_VALUE;
DbTable* tb;
-
CHECK_TABLES();
DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE, BIF_ets_internal_delete_all_2);
if (BIF_ARG_2 == am_undefined) {
- nitems = erts_make_integer(erts_atomic_read_nob(&tb->common.nitems),
- BIF_P);
-
- reds = tb->common.meth->db_delete_all_objects(BIF_P, tb, reds);
-
+ reds = tb->common.meth->db_delete_all_objects(BIF_P,
+ tb,
+ reds,
+ &nitems_holder);
+ ASSERT(nitems_holder != THE_NON_VALUE);
ASSERT(!(tb->common.status & DB_BUSY));
if (reds < 0) {
@@ -2166,7 +2168,7 @@ BIF_RETTYPE ets_internal_delete_all_2(BIF_ALIST_2)
db_unlock(tb, LCK_WRITE);
BUMP_ALL_REDS(BIF_P);
BIF_TRAP2(bif_export[BIF_ets_internal_delete_all_2], BIF_P,
- BIF_ARG_1, nitems);
+ BIF_ARG_1, nitems_holder);
}
else {
/* Done, no trapping needed */
@@ -2176,15 +2178,19 @@ BIF_RETTYPE ets_internal_delete_all_2(BIF_ALIST_2)
}
else {
/*
- * The table lookup succeeded and second argument is nitems
+ * The table lookup succeeded and second argument is nitems_holder
* and not 'undefined', which means we have trapped at least once
* and are now done.
*/
- nitems = BIF_ARG_2;
+ nitems_holder = BIF_ARG_2;
}
-
db_unlock(tb, LCK_WRITE);
+ {
+ Eterm nitems =
+ tb->common.meth->db_delete_all_objects_get_nitems_from_holder(BIF_P,
+ nitems_holder);
BIF_RET(nitems);
+ }
}
static void delete_all_objects_continue(Process* p, DbTable* tb)
@@ -2197,7 +2203,7 @@ static void delete_all_objects_continue(Process* p, DbTable* tb)
if ((tb->common.status & (DB_DELETE|DB_BUSY)) != DB_BUSY)
return;
- reds = tb->common.meth->db_delete_all_objects(p, tb, reds);
+ reds = tb->common.meth->db_delete_all_objects(p, tb, reds, NULL);
if (reds < 0) {
BUMP_ALL_REDS(p);
@@ -2283,6 +2289,7 @@ static BIF_RETTYPE ets_select_delete_trap_1(BIF_ALIST_1)
Eterm ret;
Eterm *tptr;
db_lock_kind_t kind = LCK_WRITE_REC;
+ enum DbIterSafety safety = ITER_SAFE;
CHECK_TABLES();
ASSERT(is_tuple(a1));
@@ -2292,10 +2299,11 @@ static BIF_RETTYPE ets_select_delete_trap_1(BIF_ALIST_1)
DB_TRAP_GET_TABLE(tb, tptr[1], DB_WRITE, kind,
&ets_select_delete_continue_exp);
- cret = tb->common.meth->db_select_delete_continue(p,tb,a1,&ret);
+ cret = tb->common.meth->db_select_delete_continue(p,tb,a1,&ret,&safety);
- if(!DID_TRAP(p,ret) && ITERATION_SAFETY(p,tb) != ITER_SAFE) {
- unfix_table_locked(p, tb, &kind);
+ if(!DID_TRAP(p,ret) && safety != ITER_SAFE) {
+ ASSERT(erts_refc_read(&tb->common.fix_count,1));
+ unfix_table_locked(p, tb, &kind);
}
db_unlock(tb, kind);
@@ -2333,7 +2341,8 @@ BIF_RETTYPE ets_internal_select_delete_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, &ret);
+ cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_1, BIF_ARG_2,
+ &ret, safety);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P,tb);
@@ -2725,7 +2734,7 @@ ets_select3(Process* p, DbTable* tb, Eterm tid, Eterm ms, Sint chunk_size)
cret = tb->common.meth->db_select_chunk(p, tb, tid,
ms, chunk_size,
0 /* not reversed */,
- &ret);
+ &ret, safety);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
fix_table_locked(p, tb);
}
@@ -2752,7 +2761,8 @@ ets_select3(Process* p, DbTable* tb, Eterm tid, Eterm ms, Sint chunk_size)
}
-/* We get here instead of in the real BIF when trapping */
+/* Trap here from: ets_select_1/2/3
+ */
static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
{
Process *p = BIF_P;
@@ -2763,6 +2773,7 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
Eterm ret;
Eterm *tptr;
db_lock_kind_t kind = LCK_READ;
+ enum DbIterSafety safety = ITER_SAFE;
CHECK_TABLES();
@@ -2772,11 +2783,13 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
DB_TRAP_GET_TABLE(tb, tptr[1], DB_READ, kind,
&ets_select_continue_exp);
- cret = tb->common.meth->db_select_continue(p, tb, a1,
- &ret);
+ cret = tb->common.meth->db_select_continue(p, tb, a1, &ret, &safety);
- if (!DID_TRAP(p,ret) && ITERATION_SAFETY(p,tb) != ITER_SAFE) {
- unfix_table_locked(p, tb, &kind);
+ if (!DID_TRAP(p,ret)) {
+ if (safety != ITER_SAFE) {
+ ASSERT(erts_refc_read(&tb->common.fix_count,1));
+ unfix_table_locked(p, tb, &kind);
+ }
}
db_unlock(tb, kind);
@@ -2801,8 +2814,12 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
BIF_RETTYPE ets_select_1(BIF_ALIST_1)
{
return ets_select1(BIF_P, BIF_ets_select_1, BIF_ARG_1);
+ /* TRAP: ets_select_trap_1 */
}
+/*
+ * Common impl for select/1, select_reverse/1, match/1 and match_object/1
+ */
static BIF_RETTYPE ets_select1(Process *p, int bif_ix, Eterm arg1)
{
BIF_RETTYPE result;
@@ -2810,7 +2827,7 @@ static BIF_RETTYPE ets_select1(Process *p, int bif_ix, Eterm arg1)
int cret;
Eterm ret;
Eterm *tptr;
- enum DbIterSafety safety;
+ enum DbIterSafety safety, safety_copy;
CHECK_TABLES();
@@ -2835,7 +2852,8 @@ static BIF_RETTYPE ets_select1(Process *p, int bif_ix, Eterm arg1)
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_continue(p,tb, arg1, &ret);
+ safety_copy = safety;
+ cret = tb->common.meth->db_select_continue(p,tb, arg1, &ret, &safety_copy);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
fix_table_locked(p, tb);
@@ -2867,6 +2885,7 @@ BIF_RETTYPE ets_select_2(BIF_ALIST_2)
DbTable* tb;
DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_2);
return ets_select2(BIF_P, tb, BIF_ARG_1, BIF_ARG_2);
+ /* TRAP: ets_select_trap_1 */
}
static BIF_RETTYPE
@@ -2884,7 +2903,7 @@ ets_select2(Process* p, DbTable* tb, Eterm tid, Eterm ms)
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(p, tb, tid, ms, 0, &ret);
+ cret = tb->common.meth->db_select(p, tb, tid, ms, 0, &ret, safety);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
fix_table_locked(p, tb);
@@ -2922,6 +2941,7 @@ static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1)
Eterm ret;
Eterm *tptr;
db_lock_kind_t kind = LCK_READ;
+ enum DbIterSafety safety = ITER_SAFE;
CHECK_TABLES();
@@ -2931,9 +2951,10 @@ static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1)
DB_TRAP_GET_TABLE(tb, tptr[1], DB_READ, kind,
&ets_select_count_continue_exp);
- cret = tb->common.meth->db_select_count_continue(p, tb, a1, &ret);
+ cret = tb->common.meth->db_select_count_continue(p, tb, a1, &ret, &safety);
- if (!DID_TRAP(p,ret) && ITERATION_SAFETY(p,tb) != ITER_SAFE) {
+ if (!DID_TRAP(p,ret) && safety != ITER_SAFE) {
+ ASSERT(erts_refc_read(&tb->common.fix_count,1));
unfix_table_locked(p, tb, &kind);
}
db_unlock(tb, kind);
@@ -2971,7 +2992,8 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_count(BIF_P,tb, BIF_ARG_1, BIF_ARG_2, &ret);
+ cret = tb->common.meth->db_select_count(BIF_P,tb, BIF_ARG_1, BIF_ARG_2,
+ &ret, safety);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P, tb);
@@ -3010,6 +3032,7 @@ static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1)
Eterm ret;
Eterm *tptr;
db_lock_kind_t kind = LCK_WRITE_REC;
+ enum DbIterSafety safety = ITER_SAFE;
CHECK_TABLES();
ASSERT(is_tuple(a1));
@@ -3019,9 +3042,10 @@ static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1)
DB_TRAP_GET_TABLE(tb, tptr[1], DB_WRITE, kind,
&ets_select_replace_continue_exp);
- cret = tb->common.meth->db_select_replace_continue(p,tb,a1,&ret);
+ cret = tb->common.meth->db_select_replace_continue(p,tb,a1,&ret,&safety);
- if(!DID_TRAP(p,ret) && ITERATION_SAFETY(p,tb) != ITER_SAFE) {
+ if(!DID_TRAP(p,ret) && safety != ITER_SAFE) {
+ ASSERT(erts_refc_read(&tb->common.fix_count,1));
unfix_table_locked(p, tb, &kind);
}
@@ -3064,7 +3088,8 @@ BIF_RETTYPE ets_select_replace_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_replace(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, &ret);
+ cret = tb->common.meth->db_select_replace(BIF_P, tb, BIF_ARG_1, BIF_ARG_2,
+ &ret, safety);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P,tb);
@@ -3116,7 +3141,7 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
}
cret = tb->common.meth->db_select_chunk(BIF_P,tb, BIF_ARG_1,
BIF_ARG_2, chunk_size,
- 1 /* reversed */, &ret);
+ 1 /* reversed */, &ret, safety);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P, tb);
}
@@ -3161,7 +3186,7 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
local_fix_table(tb);
}
cret = tb->common.meth->db_select(BIF_P,tb, BIF_ARG_1, BIF_ARG_2,
- 1 /*reversed*/, &ret);
+ 1 /*reversed*/, &ret, safety);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P, tb);
@@ -3265,13 +3290,29 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
int i;
Eterm* hp;
Uint freason;
+ Sint size = -1;
+ Sint memory = -1;
+ Eterm table;
+ int is_ctrs_read_result_set = 0;
/*Process* rp = NULL;*/
/* If/when we implement lockless private tables:
Eterm owner;
*/
-
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ, &freason)) == NULL) {
- if (freason == BADARG && (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)))
+ if(is_tuple(BIF_ARG_1) &&
+ is_tuple_arity(BIF_ARG_1, 2) &&
+ erts_flxctr_is_snapshot_result(tuple_val(BIF_ARG_1)[1])) {
+ Eterm counter_read_result = tuple_val(BIF_ARG_1)[1];
+ table = tuple_val(BIF_ARG_1)[2];
+ size = erts_flxctr_get_snapshot_result_after_trap(counter_read_result,
+ ERTS_DB_TABLE_NITEMS_COUNTER_ID);
+ memory = erts_flxctr_get_snapshot_result_after_trap(counter_read_result,
+ ERTS_DB_TABLE_MEM_COUNTER_ID);
+ is_ctrs_read_result_set = 1;
+ } else {
+ table = BIF_ARG_1;
+ }
+ if ((tb = db_get_table(BIF_P, table, DB_INFO, LCK_READ, &freason)) == NULL) {
+ if (freason == BADARG && (is_atom(table) || is_ref(table)))
BIF_RET(am_undefined);
else
return db_bif_fail(BIF_P, freason, BIF_ets_info_1, NULL);
@@ -3302,9 +3343,35 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
}*/
+
+ if (!is_ctrs_read_result_set) {
+ ErtsFlxCtrSnapshotResult res =
+ erts_flxctr_snapshot(&tb->common.counters, ERTS_ALC_T_DB_TABLE, BIF_P);
+ if (ERTS_FLXCTR_GET_RESULT_AFTER_TRAP == res.type) {
+ Eterm tuple;
+ db_unlock(tb, LCK_READ);
+ hp = HAlloc(BIF_P, 3);
+ tuple = TUPLE2(hp, res.trap_resume_state, table);
+ BIF_TRAP1(bif_export[BIF_ets_info_1], BIF_P, tuple);
+ } else if (res.type == ERTS_FLXCTR_TRY_AGAIN_AFTER_TRAP) {
+ db_unlock(tb, LCK_READ);
+ BIF_TRAP1(bif_export[BIF_ets_info_1], BIF_P, table);
+ } else {
+ size = res.result[ERTS_DB_TABLE_NITEMS_COUNTER_ID];
+ memory = res.result[ERTS_DB_TABLE_MEM_COUNTER_ID];
+ is_ctrs_read_result_set = 1;
+ }
+ }
for (i = 0; i < sizeof(fields)/sizeof(Eterm); i++) {
- results[i] = table_info(BIF_P, tb, fields[i]);
- ASSERT(is_value(results[i]));
+ if (is_ctrs_read_result_set && am_size == fields[i]) {
+ results[i] = erts_make_integer(size, BIF_P);
+ } else if (is_ctrs_read_result_set && am_memory == fields[i]) {
+ Sint words = (Sint) ((memory + sizeof(Sint) - 1) / sizeof(Sint));
+ results[i] = erts_make_integer(words, BIF_P);
+ } else {
+ results[i] = table_info(BIF_P, tb, fields[i]);
+ ASSERT(is_value(results[i]));
+ }
}
db_unlock(tb, LCK_READ);
@@ -3332,14 +3399,43 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2)
DbTable* tb;
Eterm ret = THE_NON_VALUE;
Uint freason;
-
+ if (erts_flxctr_is_snapshot_result(BIF_ARG_1)) {
+ Sint res;
+ if (am_memory == BIF_ARG_2) {
+ res = erts_flxctr_get_snapshot_result_after_trap(BIF_ARG_1,
+ ERTS_DB_TABLE_MEM_COUNTER_ID);
+ res = (Sint) ((res + sizeof(Sint) - 1) / sizeof(Sint));
+ } else {
+ res = erts_flxctr_get_snapshot_result_after_trap(BIF_ARG_1,
+ ERTS_DB_TABLE_NITEMS_COUNTER_ID);
+ }
+ BIF_RET(erts_make_integer(res, BIF_P));
+ }
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ, &freason)) == NULL) {
if (freason == BADARG && (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)))
BIF_RET(am_undefined);
else
return db_bif_fail(BIF_P, freason, BIF_ets_info_2, NULL);
}
- ret = table_info(BIF_P, tb, BIF_ARG_2);
+ if (BIF_ARG_2 == am_size || BIF_ARG_2 == am_memory) {
+ ErtsFlxCtrSnapshotResult res =
+ erts_flxctr_snapshot(&tb->common.counters, ERTS_ALC_T_DB_TABLE, BIF_P);
+ if (ERTS_FLXCTR_GET_RESULT_AFTER_TRAP == res.type) {
+ db_unlock(tb, LCK_READ);
+ BIF_TRAP2(bif_export[BIF_ets_info_2], BIF_P, res.trap_resume_state, BIF_ARG_2);
+ } else if (res.type == ERTS_FLXCTR_TRY_AGAIN_AFTER_TRAP) {
+ db_unlock(tb, LCK_READ);
+ BIF_TRAP2(bif_export[BIF_ets_info_2], BIF_P, BIF_ARG_1, BIF_ARG_2);
+ } else if (BIF_ARG_2 == am_size) {
+ ret = erts_make_integer(res.result[ERTS_DB_TABLE_NITEMS_COUNTER_ID], BIF_P);
+ } else { /* BIF_ARG_2 == am_memory */
+ Sint r = res.result[ERTS_DB_TABLE_MEM_COUNTER_ID];
+ r = (Sint) ((r + sizeof(Sint) - 1) / sizeof(Sint));
+ ret = erts_make_integer(r, BIF_P);
+ }
+ } else {
+ ret = table_info(BIF_P, tb, BIF_ARG_2);
+ }
db_unlock(tb, LCK_READ);
if (is_non_value(ret)) {
BIF_ERROR(BIF_P, BADARG);
@@ -3506,6 +3602,7 @@ void init_db(ErtsDbSpinCount db_spin_count)
db_initialize_hash();
db_initialize_tree();
+ db_initialize_catree();
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_delete_continue_exp,
@@ -3696,7 +3793,7 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
/*
* erts_db_process_exiting() is called when a process terminates.
* It returns 0 when completely done, and !0 when it wants to
- * yield. c_p->u.terminate can hold a pointer to a state while
+ * yield. *yield_state can hold a pointer to a state while
* yielding.
*/
#define ERTS_DB_INTERNAL_ERROR(LSTR) \
@@ -3704,7 +3801,7 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
__FILE__, __LINE__)
int
-erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
+erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks, void **yield_state)
{
typedef struct {
enum {
@@ -3714,7 +3811,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
}op;
DbTable *tb;
} CleanupState;
- CleanupState *state = (CleanupState *) c_p->u.terminate;
+ CleanupState *state = (CleanupState *) *yield_state;
Eterm pid = c_p->common.id;
CleanupState default_state;
SWord initial_reds = ERTS_BIF_REDS_LEFT(c_p);
@@ -3787,7 +3884,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
if (state != &default_state)
erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
- c_p->u.terminate = NULL;
+ *yield_state = NULL;
BUMP_REDS(c_p, (initial_reds - reds));
return 0;
@@ -3807,12 +3904,12 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
yield:
if (state == &default_state) {
- c_p->u.terminate = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
- sizeof(CleanupState));
- sys_memcpy(c_p->u.terminate, (void*) state, sizeof(CleanupState));
+ *yield_state = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
+ sizeof(CleanupState));
+ sys_memcpy(*yield_state, (void*) state, sizeof(CleanupState));
}
else
- ASSERT(state == c_p->u.terminate);
+ ASSERT(state == *yield_state);
return !0;
}
@@ -3907,7 +4004,7 @@ struct free_fixations_ctx
SWord cnt;
};
-static void free_fixations_op(DbFixation* fix, void* vctx)
+static int free_fixations_op(DbFixation* fix, void* vctx, Sint reds)
{
struct free_fixations_ctx* ctx = (struct free_fixations_ctx*) vctx;
erts_aint_t diff;
@@ -3944,6 +4041,7 @@ static void free_fixations_op(DbFixation* fix, void* vctx)
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
}
ctx->cnt++;
+ return 1;
}
int erts_db_execute_free_fixation(Process* p, DbFixation* fix)
@@ -4088,7 +4186,7 @@ struct fixing_procs_info_ctx
Eterm list;
};
-static void fixing_procs_info_op(DbFixation* fix, void* vctx)
+static int fixing_procs_info_op(DbFixation* fix, void* vctx, Sint reds)
{
struct fixing_procs_info_ctx* ctx = (struct fixing_procs_info_ctx*) vctx;
Eterm* hp;
@@ -4098,6 +4196,7 @@ static void fixing_procs_info_op(DbFixation* fix, void* vctx)
tpl = TUPLE2(hp, fix->procs.p->common.id, make_small(fix->counter));
hp += 3;
ctx->list = CONS(hp, tpl, ctx->list);
+ return 1;
}
static Eterm table_info(Process* p, DbTable* tb, Eterm What)
@@ -4106,7 +4205,8 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
int use_monotonic;
if (What == am_size) {
- ret = make_small(erts_atomic_read_nob(&tb->common.nitems));
+ Uint size = (Uint) (DB_GET_APPROX_NITEMS(tb));
+ ret = erts_make_integer(size, p);
} else if (What == am_type) {
if (tb->common.status & DB_SET) {
ret = am_set;
@@ -4114,12 +4214,14 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_duplicate_bag;
} else if (tb->common.status & DB_ORDERED_SET) {
ret = am_ordered_set;
+ } else if (tb->common.status & DB_CA_ORDERED_SET) {
+ ret = am_ordered_set;
} else { /*TT*/
ASSERT(tb->common.status & DB_BAG);
ret = am_bag;
}
} else if (What == am_memory) {
- Uint words = (Uint) ((erts_atomic_read_nob(&tb->common.memory_size)
+ Uint words = (Uint) ((DB_GET_APPROX_MEM_CONSUMED(tb)
+ sizeof(Uint)
- 1)
/ sizeof(Uint));
@@ -4240,9 +4342,20 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
make_small(stats.max_chain_len),
make_small(stats.kept_items));
}
- else {
+ else if (IS_CATREE_TABLE(tb->common.status)) {
+ DbCATreeStats stats;
+ Eterm* hp;
+
+ db_calc_stats_catree(&tb->catree, &stats);
+ hp = HAlloc(p, 4);
+ ret = TUPLE3(hp,
+ make_small(stats.route_nodes),
+ make_small(stats.base_nodes),
+ make_small(stats.max_depth));
+
+ }
+ else
ret = am_false;
- }
}
return ret;
}
@@ -4266,9 +4379,9 @@ static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb)
tb->common.meth->db_print(to, to_arg, show, tb);
- erts_print(to, to_arg, "Objects: %d\n", (int)erts_atomic_read_nob(&tb->common.nitems));
+ erts_print(to, to_arg, "Objects: %d\n", (int)DB_GET_APPROX_NITEMS(tb));
erts_print(to, to_arg, "Words: %bpu\n",
- (Uint) ((erts_atomic_read_nob(&tb->common.memory_size)
+ (Uint) ((DB_GET_APPROX_MEM_CONSUMED(tb)
+ sizeof(Uint)
- 1)
/ sizeof(Uint)));
@@ -4409,6 +4522,12 @@ void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable) {
if(IS_HASH_TABLE(tb->common.status)) {
erts_lcnt_enable_db_hash_lock_count(&tb->hash, enable);
+ } else if(IS_CATREE_TABLE(tb->common.status)) {
+ /* erts_lcnt_enable_db_catree_lock_count is not thread safe so
+ the table needs to get locked */
+ db_lock(tb, LCK_WRITE);
+ erts_lcnt_enable_db_catree_lock_count(&tb->catree, enable);
+ db_unlock(tb, LCK_WRITE);
}
}
@@ -4441,3 +4560,29 @@ void erts_lcnt_update_db_locks(int enable) {
#ifdef ETS_DBG_FORCE_TRAP
erts_aint_t erts_ets_dbg_force_trap = 0;
#endif
+
+int erts_ets_force_split(Eterm tid, int on)
+{
+ DbTable* tb = tid2tab(tid);
+ if (!tb || !IS_CATREE_TABLE(tb->common.type))
+ return 0;
+
+ db_lock(tb, LCK_WRITE);
+ if (!(tb->common.status & DB_DELETE))
+ db_catree_force_split(&tb->catree, on);
+ db_unlock(tb, LCK_WRITE);
+ return 1;
+}
+
+int erts_ets_debug_random_split_join(Eterm tid, int on)
+{
+ DbTable* tb = tid2tab(tid);
+ if (!tb || !IS_CATREE_TABLE(tb->common.type))
+ return 0;
+
+ db_lock(tb, LCK_WRITE);
+ if (!(tb->common.status & DB_DELETE))
+ db_catree_debug_random_split_join(&tb->catree, on);
+ db_unlock(tb, LCK_WRITE);
+ return 1;
+}
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index 23975d208f..b3dc1b9ba3 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -66,6 +66,7 @@ typedef struct {
#include "erl_db_util.h" /* Flags */
#include "erl_db_hash.h" /* DbTableHash */
#include "erl_db_tree.h" /* DbTableTree */
+#include "erl_db_catree.h" /* DbTableCATree */
/*TT*/
Uint erts_get_ets_misc_mem_size(void);
@@ -90,6 +91,7 @@ union db_table {
DbTableCommon common; /* Any type of db table */
DbTableHash hash; /* Linear hash array specific data */
DbTableTree tree; /* AVL tree specific data */
+ DbTableCATree catree; /* CA tree specific data */
DbTableRelease release;
/*TT*/
};
@@ -109,7 +111,7 @@ typedef enum {
} ErtsDbSpinCount;
void init_db(ErtsDbSpinCount);
-int erts_db_process_exiting(Process *, ErtsProcLocks);
+int erts_db_process_exiting(Process *, ErtsProcLocks, void **);
int erts_db_execute_free_fixation(Process*, DbFixation*);
void db_info(fmtfn_t, void *, int);
void erts_db_foreach_table(void (*)(DbTable *, void *), void *);
@@ -128,6 +130,8 @@ extern Export ets_select_continue_exp;
extern erts_atomic_t erts_ets_misc_mem_size;
Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
+int erts_ets_force_split(Eterm tid, int on);
+int erts_ets_debug_random_split_join(Eterm tid, int on);
Uint erts_db_get_max_tabs(void);
Eterm erts_db_make_tid(Process *c_p, DbTableCommon *tb);
@@ -157,7 +161,9 @@ do { \
erts_aint_t sz__ = (((erts_aint_t) (ALLOC_SZ)) \
- ((erts_aint_t) (FREE_SZ))); \
ASSERT((TAB)); \
- erts_atomic_add_nob(&(TAB)->common.memory_size, sz__); \
+ erts_flxctr_add(&(TAB)->common.counters, \
+ ERTS_DB_TABLE_MEM_COUNTER_ID, \
+ sz__); \
} while (0)
#define ERTS_ETS_MISC_MEM_ADD(SZ) \
@@ -284,6 +290,12 @@ ERTS_GLB_INLINE void erts_db_free(ErtsAlcType_t type,
void *ptr,
Uint size);
+ERTS_GLB_INLINE void erts_schedule_db_free(DbTableCommon* tab,
+ void (*free_func)(void *),
+ void *ptr,
+ ErtsThrPrgrLaterOp *lop,
+ Uint size);
+
ERTS_GLB_INLINE void erts_db_free_nt(ErtsAlcType_t type,
void *ptr,
Uint size);
@@ -296,11 +308,31 @@ erts_db_free(ErtsAlcType_t type, DbTable *tab, void *ptr, Uint size)
ASSERT(ptr != 0);
ASSERT(size == ERTS_ALC_DBG_BLK_SZ(ptr));
ERTS_DB_ALC_MEM_UPDATE_(tab, size, 0);
+ ASSERT(((void *) tab) != ptr ||
+ tab->common.counters.is_decentralized ||
+ 0 == erts_flxctr_read_centralized(&tab->common.counters,
+ ERTS_DB_TABLE_MEM_COUNTER_ID));
+ erts_free(type, ptr);
+}
- ASSERT(((void *) tab) != ptr
- || erts_atomic_read_nob(&tab->common.memory_size) == 0);
+ERTS_GLB_INLINE void
+erts_schedule_db_free(DbTableCommon* tab,
+ void (*free_func)(void *),
+ void *ptr,
+ ErtsThrPrgrLaterOp *lop,
+ Uint size)
+{
+ ASSERT(ptr != 0);
+ ASSERT(((void *) tab) != ptr);
+ ASSERT(size == ERTS_ALC_DBG_BLK_SZ(ptr));
- erts_free(type, ptr);
+ /*
+ * We update table memory stats here as table may already be gone
+ * when 'free_func' is finally called.
+ */
+ ERTS_DB_ALC_MEM_UPDATE_((DbTable*)tab, size, 0);
+
+ erts_schedule_thr_prgr_later_cleanup_op(free_func, ptr, lop, size);
}
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/erl_db_catree.c b/erts/emulator/beam/erl_db_catree.c
new file mode 100644
index 0000000000..e0d5e44f58
--- /dev/null
+++ b/erts/emulator/beam/erl_db_catree.c
@@ -0,0 +1,2332 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB and Kjell Winblad 1998-2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Implementation of ETS ordered_set table type with
+ * fine-grained synchronization.
+ *
+ * Author: Kjell Winblad
+ *
+ * This implementation is based on the contention adapting search tree
+ * (CA tree). The CA tree is a concurrent data structure that
+ * dynamically adapts its synchronization granularity based on how
+ * much contention is detected in locks. The following publication
+ * contains a detailed description of CA trees:
+ *
+ * A Contention Adapting Approach to Concurrent Ordered Sets
+ * Journal of Parallel and Distributed Computing, 2018
+ * Kjell Winblad and Konstantinos Sagonas
+ * https://doi.org/10.1016/j.jpdc.2017.11.007
+ *
+ * The following publication may also be interesting as it discusses
+ * how the CA tree can be used as an ETS ordered_set table type
+ * backend:
+ *
+ * More Scalable Ordered Set for ETS Using Adaptation
+ * In Thirteenth ACM SIGPLAN workshop on Erlang (2014)
+ * Kjell Winblad and Konstantinos Sagonas
+ * https://doi.org/10.1145/2633448.2633455
+ *
+ * This implementation of the ordered_set ETS table type is only
+ * activated when the options {write_concurrency, true}, public and
+ * ordered_set are passed to the ets:new/2 function. This
+ * implementation is expected to scale better than the default
+ * implementation located in "erl_db_tree.c".
+ *
+ * The default implementation has a static stack optimization (see
+ * get_static_stack in erl_db_tree.c). This implementation does not
+ * have such an optimization as it induces bad scalability when
+ * concurrent read operations are frequent (they all try to get hold
+ * of the same stack). The default implementation may thus perform
+ * better compared to this implementation in scenarios where the
+ * static stack optimization is useful. One such scenario is when only
+ * one process is accessing the table and this process is traversing
+ * the table with a sequence of next/2 calls.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#define ERTS_WANT_DB_INTERNAL__
+#include "erl_db.h"
+#include "bif.h"
+#include "big.h"
+#include "erl_binary.h"
+
+#include "erl_db_catree.h"
+#include "erl_db_tree.h"
+#include "erl_db_tree_util.h"
+
+#ifdef DEBUG
+# define IF_DEBUG(X) X
+#else
+# define IF_DEBUG(X)
+#endif
+
+/*
+** Forward declarations
+*/
+
+static SWord do_free_base_node_cont(DbTableCATree *tb, SWord num_left);
+static SWord do_free_routing_nodes_catree_cont(DbTableCATree *tb, SWord num_left);
+static DbTableCATreeNode *catree_first_base_node_from_free_list(DbTableCATree *tb);
+
+/* Method interface functions */
+static int db_first_catree(Process *p, DbTable *tbl,
+ Eterm *ret);
+static int db_next_catree(Process *p, DbTable *tbl,
+ Eterm key, Eterm *ret);
+static int db_last_catree(Process *p, DbTable *tbl,
+ Eterm *ret);
+static int db_prev_catree(Process *p, DbTable *tbl,
+ Eterm key,
+ Eterm *ret);
+static int db_put_catree(DbTable *tbl, Eterm obj, int key_clash_fail);
+static int db_get_catree(Process *p, DbTable *tbl,
+ Eterm key, Eterm *ret);
+static int db_member_catree(DbTable *tbl, Eterm key, Eterm *ret);
+static int db_get_element_catree(Process *p, DbTable *tbl,
+ Eterm key,int ndex,
+ Eterm *ret);
+static int db_erase_catree(DbTable *tbl, Eterm key, Eterm *ret);
+static int db_erase_object_catree(DbTable *tbl, Eterm object,Eterm *ret);
+static int db_slot_catree(Process *p, DbTable *tbl,
+ Eterm slot_term, Eterm *ret);
+static int db_select_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, int reversed, Eterm *ret,
+ enum DbIterSafety);
+static int db_select_count_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret, enum DbIterSafety);
+static int db_select_chunk_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Sint chunk_size,
+ int reversed, Eterm *ret, enum DbIterSafety);
+static int db_select_continue_catree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
+static int db_select_count_continue_catree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
+static int db_select_delete_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety);
+static int db_select_delete_continue_catree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
+static int db_select_replace_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety);
+static int db_select_replace_continue_catree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
+static int db_take_catree(Process *, DbTable *, Eterm, Eterm *);
+static void db_print_catree(fmtfn_t to, void *to_arg,
+ int show, DbTable *tbl);
+static int db_free_table_catree(DbTable *tbl);
+static SWord db_free_table_continue_catree(DbTable *tbl, SWord);
+static void db_foreach_offheap_catree(DbTable *,
+ void (*)(ErlOffHeap *, void *),
+ void *);
+static SWord db_delete_all_objects_catree(Process* p,
+ DbTable* tbl,
+ SWord reds,
+ Eterm* nitems_holder_wb);
+static Eterm db_delete_all_objects_get_nitems_from_holder_catree(Process* p,
+ Eterm nitems_holder);
+static int
+db_lookup_dbterm_catree(Process *, DbTable *, Eterm key, Eterm obj,
+ DbUpdateHandle*);
+static void db_finalize_dbterm_catree(int cret, DbUpdateHandle *);
+
+static void split_catree(DbTableCATree *tb,
+ DbTableCATreeNode* ERTS_RESTRICT base,
+ DbTableCATreeNode* ERTS_RESTRICT parent);
+static void join_catree(DbTableCATree *tb,
+ DbTableCATreeNode *thiz,
+ DbTableCATreeNode *parent);
+
+
+/*
+** External interface
+*/
+DbTableMethod db_catree =
+{
+ db_create_catree,
+ db_first_catree,
+ db_next_catree,
+ db_last_catree,
+ db_prev_catree,
+ db_put_catree,
+ db_get_catree,
+ db_get_element_catree,
+ db_member_catree,
+ db_erase_catree,
+ db_erase_object_catree,
+ db_slot_catree,
+ db_select_chunk_catree,
+ db_select_catree,
+ db_select_delete_catree,
+ db_select_continue_catree,
+ db_select_delete_continue_catree,
+ db_select_count_catree,
+ db_select_count_continue_catree,
+ db_select_replace_catree,
+ db_select_replace_continue_catree,
+ db_take_catree,
+ db_delete_all_objects_catree,
+ db_delete_all_objects_get_nitems_from_holder_catree,
+ db_free_table_catree,
+ db_free_table_continue_catree,
+ db_print_catree,
+ db_foreach_offheap_catree,
+ db_lookup_dbterm_catree,
+ db_finalize_dbterm_catree
+
+};
+
+/*
+ * Constants
+ */
+
+#define ERL_DB_CATREE_LOCK_FAILURE_CONTRIBUTION 200
+#define ERL_DB_CATREE_LOCK_SUCCESS_CONTRIBUTION (-1)
+#define ERL_DB_CATREE_LOCK_MORE_THAN_ONE_CONTRIBUTION (-10)
+#define ERL_DB_CATREE_HIGH_CONTENTION_LIMIT 1000
+#define ERL_DB_CATREE_LOW_CONTENTION_LIMIT (-1000)
+#define ERL_DB_CATREE_MAX_ROUTE_NODE_LAYER_HEIGHT 14
+
+/*
+ * Internal CA tree related helper functions and macros
+ */
+
+#define GET_ROUTE_NODE_KEY(node) (node->u.route.key.term)
+#define GET_BASE_NODE_LOCK(node) (&(node->u.base.lock))
+#define GET_ROUTE_NODE_LOCK(node) (&(node->u.route.lock))
+
+
+/* Helpers for reading and writing shared atomic variables */
+
+/* No memory barrier */
+#define GET_ROOT(tb) ((DbTableCATreeNode*)erts_atomic_read_nob(&((tb)->root)))
+#define GET_LEFT(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_nob(&(ca_tree_route_node->u.route.left)))
+#define GET_RIGHT(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_nob(&(ca_tree_route_node->u.route.right)))
+#define SET_ROOT(tb, v) erts_atomic_set_nob(&((tb)->root), (erts_aint_t)(v))
+#define SET_LEFT(ca_tree_route_node, v) erts_atomic_set_nob(&(ca_tree_route_node->u.route.left), (erts_aint_t)(v));
+#define SET_RIGHT(ca_tree_route_node, v) erts_atomic_set_nob(&(ca_tree_route_node->u.route.right), (erts_aint_t)(v));
+
+
+/* Release or acquire barriers */
+#define GET_ROOT_ACQB(tb) ((DbTableCATreeNode*)erts_atomic_read_acqb(&((tb)->root)))
+#define GET_LEFT_ACQB(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_acqb(&(ca_tree_route_node->u.route.left)))
+#define GET_RIGHT_ACQB(ca_tree_route_node) ((DbTableCATreeNode*)erts_atomic_read_acqb(&(ca_tree_route_node->u.route.right)))
+#define SET_ROOT_RELB(tb, v) erts_atomic_set_relb(&((tb)->root), (erts_aint_t)(v))
+#define SET_LEFT_RELB(ca_tree_route_node, v) erts_atomic_set_relb(&(ca_tree_route_node->u.route.left), (erts_aint_t)(v));
+#define SET_RIGHT_RELB(ca_tree_route_node, v) erts_atomic_set_relb(&(ca_tree_route_node->u.route.right), (erts_aint_t)(v));
+
+/* Compares a key to the key in a route node */
+static ERTS_INLINE Sint cmp_key_route(Eterm key,
+ DbTableCATreeNode *obj)
+{
+ return CMP(key, GET_ROUTE_NODE_KEY(obj));
+}
+
+/*
+ * Used by the split_tree function
+ */
+static ERTS_INLINE
+int less_than_two_elements(TreeDbTerm *root)
+{
+ return root == NULL || (root->left == NULL && root->right == NULL);
+}
+
+/*
+ * Inserts a TreeDbTerm into a tree. Returns the new root.
+ */
+static ERTS_INLINE
+TreeDbTerm* insert_TreeDbTerm(DbTableCATree *tb,
+ TreeDbTerm *insert_to_root,
+ TreeDbTerm *value_to_insert) {
+ /* Non recursive insertion in AVL tree, building our own stack */
+ TreeDbTerm **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 0;
+ int state = 0;
+ TreeDbTerm * base = insert_to_root;
+ TreeDbTerm **this = &base;
+ Sint c;
+ Eterm key;
+ int dir;
+ TreeDbTerm *p1, *p2, *p;
+
+ key = GETKEY(tb, value_to_insert->dbterm.tpl);
+
+ dstack[dpos++] = DIR_END;
+ for (;;)
+ if (!*this) { /* Found our place */
+ state = 1;
+ *this = value_to_insert;
+ (*this)->balance = 0;
+ (*this)->left = (*this)->right = NULL;
+ break;
+ } else if ((c = cmp_key(&tb->common, key, *this)) < 0) {
+ /* go lefts */
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ }
+
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ p = *this;
+ if (dir == DIR_LEFT) {
+ switch (p->balance) {
+ case 1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = -1;
+ break;
+ case -1: /* The icky case */
+ p1 = p->left;
+ if (p1->balance == -1) { /* Single LL rotation */
+ p->left = p1->right;
+ p1->right = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RR rotation */
+ p2 = p1->right;
+ p1->right = p2->left;
+ p2->left = p1;
+ p->left = p2->right;
+ p2->right = p;
+ p->balance = (p2->balance == -1) ? +1 : 0;
+ p1->balance = (p2->balance == 1) ? -1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ } else { /* dir == DIR_RIGHT */
+ switch (p->balance) {
+ case -1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = 1;
+ break;
+ case 1:
+ p1 = p->right;
+ if (p1->balance == 1) { /* Single RR rotation */
+ p->right = p1->left;
+ p1->left = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RL rotation */
+ p2 = p1->left;
+ p1->left = p2->right;
+ p2->right = p1;
+ p->right = p2->left;
+ p2->left = p;
+ p->balance = (p2->balance == 1) ? -1 : 0;
+ p1->balance = (p2->balance == -1) ? 1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ }
+ }
+ return base;
+}
+
+/*
+ * Split an AVL tree into two trees. The function stores the node
+ * containing the "split key" in the write back parameter
+ * split_key_wb. The function stores the left tree containing the keys
+ * that are smaller than the "split key" in the write back parameter
+ * left_wb and the tree containing the rest of the keys in the write
+ * back parameter right_wb.
+ */
+static void split_tree(DbTableCATree *tb,
+ TreeDbTerm *root,
+ TreeDbTerm **split_key_node_wb,
+ TreeDbTerm **left_wb,
+ TreeDbTerm **right_wb) {
+ TreeDbTerm * split_node = NULL;
+ TreeDbTerm * left_root;
+ TreeDbTerm * right_root;
+ if (root->left == NULL) { /* To get non empty split */
+ *right_wb = root->right;
+ *split_key_node_wb = root->right;
+ root->right = NULL;
+ root->balance = 0;
+ *left_wb = root;
+ return;
+ }
+ split_node = root;
+ left_root = split_node->left;
+ split_node->left = NULL;
+ right_root = split_node->right;
+ split_node->right = NULL;
+ right_root = insert_TreeDbTerm(tb, right_root, split_node);
+ *split_key_node_wb = split_node;
+ *left_wb = left_root;
+ *right_wb = right_root;
+}
+
+/*
+ * Used by the join_trees function
+ */
+static ERTS_INLINE int compute_tree_hight(TreeDbTerm * root)
+{
+ if(root == NULL) {
+ return 0;
+ } else {
+ TreeDbTerm * current_node = root;
+ int hight_so_far = 1;
+ while (current_node->left != NULL || current_node->right != NULL) {
+ if (current_node->balance == -1) {
+ current_node = current_node->left;
+ } else {
+ current_node = current_node->right;
+ }
+ hight_so_far = hight_so_far + 1;
+ }
+ return hight_so_far;
+ }
+}
+
+/*
+ * Used by the join_trees function
+ */
+static ERTS_INLINE
+TreeDbTerm* linkout_min_or_max_tree_node(TreeDbTerm **root, int is_min)
+{
+ TreeDbTerm **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 0;
+ int state = 0;
+ TreeDbTerm **this = root;
+ int dir;
+ TreeDbTerm *q = NULL;
+
+ dstack[dpos++] = DIR_END;
+ for (;;) {
+ if (!*this) { /* Failure */
+ return NULL;
+ } else if (is_min && (*this)->left != NULL) {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (!is_min && (*this)->right != NULL) {
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else { /* Min value, found the one to splice out */
+ q = (*this);
+ if (q->right == NULL) {
+ (*this) = q->left;
+ state = 1;
+ } else if (q->left == NULL) {
+ (*this) = q->right;
+ state = 1;
+ }
+ break;
+ }
+ }
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ if (dir == DIR_LEFT) {
+ state = tree_balance_left(this);
+ } else {
+ state = tree_balance_right(this);
+ }
+ }
+ return q;
+}
+
+#define LINKOUT_MIN_TREE_NODE(root) linkout_min_or_max_tree_node(root, 1)
+#define LINKOUT_MAX_TREE_NODE(root) linkout_min_or_max_tree_node(root, 0)
+
+/*
+ * Joins two AVL trees where all the keys in the left one are smaller
+ * then the keys in the right one and returns the resulting tree.
+ *
+ * The algorithm is described on page 474 in D. E. Knuth. The Art of
+ * Computer Programming: Sorting and Searching,
+ * vol. 3. Addison-Wesley, 2nd edition, 1998.
+ */
+static TreeDbTerm* join_trees(TreeDbTerm *left_root_param,
+ TreeDbTerm *right_root_param)
+{
+ TreeDbTerm **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 0;
+ int state = 1;
+ TreeDbTerm **this;
+ int dir;
+ TreeDbTerm *p1, *p2, *p;
+ TreeDbTerm *left_root = left_root_param;
+ TreeDbTerm *right_root = right_root_param;
+ int left_height;
+ int right_height;
+ int current_height;
+ dstack[dpos++] = DIR_END;
+ if (left_root == NULL) {
+ return right_root;
+ } else if (right_root == NULL) {
+ return left_root;
+ }
+
+ left_height = compute_tree_hight(left_root);
+ right_height = compute_tree_hight(right_root);
+ if (left_height >= right_height) {
+ TreeDbTerm * new_root =
+ LINKOUT_MIN_TREE_NODE(&right_root);
+ int new_right_height = compute_tree_hight(right_root);
+ TreeDbTerm * current_node = left_root;
+ this = &left_root;
+ current_height = left_height;
+ while(current_height > new_right_height + 1) {
+ if (current_node->balance == -1) {
+ current_height = current_height - 2;
+ } else {
+ current_height = current_height - 1;
+ }
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ current_node = current_node->right;
+ }
+ new_root->left = current_node;
+ new_root->right = right_root;
+ new_root->balance = new_right_height - current_height;
+ *this = new_root;
+ } else {
+ /* This case is symmetric to the previous case */
+ TreeDbTerm * new_root =
+ LINKOUT_MAX_TREE_NODE(&left_root);
+ int new_left_height = compute_tree_hight(left_root);
+ TreeDbTerm * current_node = right_root;
+ this = &right_root;
+ current_height = right_height;
+ while (current_height > new_left_height + 1) {
+ if (current_node->balance == 1) {
+ current_height = current_height - 2;
+ } else {
+ current_height = current_height - 1;
+ }
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ current_node = current_node->left;
+ }
+ new_root->right = current_node;
+ new_root->left = left_root;
+ new_root->balance = current_height - new_left_height;
+ *this = new_root;
+ }
+ /* Now we need to continue as if this was during the insert */
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ p = *this;
+ if (dir == DIR_LEFT) {
+ switch (p->balance) {
+ case 1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = -1;
+ break;
+ case -1: /* The icky case */
+ p1 = p->left;
+ if (p1->balance == -1) { /* Single LL rotation */
+ p->left = p1->right;
+ p1->right = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RR rotation */
+ p2 = p1->right;
+ p1->right = p2->left;
+ p2->left = p1;
+ p->left = p2->right;
+ p2->right = p;
+ p->balance = (p2->balance == -1) ? +1 : 0;
+ p1->balance = (p2->balance == 1) ? -1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ } else { /* dir == DIR_RIGHT */
+ switch (p->balance) {
+ case -1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = 1;
+ break;
+ case 1:
+ p1 = p->right;
+ if (p1->balance == 1) { /* Single RR rotation */
+ p->right = p1->left;
+ p1->left = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RL rotation */
+ p2 = p1->left;
+ p1->left = p2->right;
+ p2->right = p1;
+ p->right = p2->left;
+ p2->left = p;
+ p->balance = (p2->balance == 1) ? -1 : 0;
+ p1->balance = (p2->balance == -1) ? 1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ }
+ }
+ /* Return the joined tree */
+ if (left_height >= right_height) {
+ return left_root;
+ } else {
+ return right_root;
+ }
+}
+
+#ifdef DEBUG
+# define PROVOKE_RANDOM_SPLIT_JOIN
+#endif
+#ifdef PROVOKE_RANDOM_SPLIT_JOIN
+static int dbg_fastrand(void)
+{
+ static int g_seed = 648835;
+ g_seed = (214013*g_seed+2531011);
+ return (g_seed>>16)&0x7FFF;
+}
+
+static void dbg_provoke_random_splitjoin(DbTableCATree* tb,
+ DbTableCATreeNode* base_node)
+{
+ if (tb->common.status & DB_CATREE_FORCE_SPLIT ||
+ !(tb->common.status & DB_CATREE_DEBUG_RANDOM_SPLIT_JOIN))
+ return;
+
+ switch (dbg_fastrand() % 8) {
+ case 1:
+ base_node->u.base.lock_statistics = 1+ERL_DB_CATREE_HIGH_CONTENTION_LIMIT;
+ break;
+ case 2:
+ base_node->u.base.lock_statistics = -1+ERL_DB_CATREE_LOW_CONTENTION_LIMIT;
+ break;
+ }
+}
+#else
+# define dbg_provoke_random_splitjoin(T,N)
+#endif /* PROVOKE_RANDOM_SPLIT_JOIN */
+
+static ERTS_INLINE
+int try_wlock_base_node(DbTableCATreeBaseNode *base_node)
+{
+ return EBUSY == erts_rwmtx_tryrwlock(&base_node->lock);
+}
+
+/*
+ * Locks a base node without adjusting the lock statistics
+ */
+static ERTS_INLINE
+void wlock_base_node_no_stats(DbTableCATreeNode *base_node)
+{
+ ASSERT(base_node->is_base_node);
+ erts_rwmtx_rwlock(&base_node->u.base.lock);
+}
+
+/*
+ * Locks a base node and adjusts the lock statistics according to if
+ * the lock was contended or not
+ */
+static ERTS_INLINE
+void wlock_base_node(DbTableCATreeNode *base_node)
+{
+ ASSERT(base_node->is_base_node);
+ if (try_wlock_base_node(&base_node->u.base)) {
+ /* The lock is contended */
+ wlock_base_node_no_stats(base_node);
+ base_node->u.base.lock_statistics += ERL_DB_CATREE_LOCK_FAILURE_CONTRIBUTION;
+ } else {
+ base_node->u.base.lock_statistics += ERL_DB_CATREE_LOCK_SUCCESS_CONTRIBUTION;
+ }
+}
+
+static ERTS_INLINE
+void wunlock_base_node(DbTableCATreeNode *base_node)
+{
+ erts_rwmtx_rwunlock(&base_node->u.base.lock);
+}
+
+static ERTS_INLINE
+void wunlock_adapt_base_node(DbTableCATree* tb,
+ DbTableCATreeNode* node,
+ DbTableCATreeNode* parent,
+ int current_level)
+{
+ dbg_provoke_random_splitjoin(tb,node);
+ if ((!node->u.base.root && parent && !(tb->common.status
+ & DB_CATREE_FORCE_SPLIT))
+ || node->u.base.lock_statistics < ERL_DB_CATREE_LOW_CONTENTION_LIMIT) {
+ join_catree(tb, node, parent);
+ }
+ else if (node->u.base.lock_statistics > ERL_DB_CATREE_HIGH_CONTENTION_LIMIT
+ && current_level < ERL_DB_CATREE_MAX_ROUTE_NODE_LAYER_HEIGHT) {
+ split_catree(tb, node, parent);
+ }
+ else {
+ wunlock_base_node(node);
+ }
+}
+
+static ERTS_INLINE
+void rlock_base_node(DbTableCATreeNode *base_node)
+{
+ ASSERT(base_node->is_base_node);
+ erts_rwmtx_rlock(&base_node->u.base.lock);
+}
+
+static ERTS_INLINE
+void runlock_base_node(DbTableCATreeNode *base_node)
+{
+ ASSERT(base_node->is_base_node);
+ erts_rwmtx_runlock(&base_node->u.base.lock);
+}
+
+static ERTS_INLINE
+void lock_route_node(DbTableCATreeNode *route_node)
+{
+ ASSERT(!route_node->is_base_node);
+ erts_mtx_lock(&route_node->u.route.lock);
+}
+
+static ERTS_INLINE
+void unlock_route_node(DbTableCATreeNode *route_node)
+{
+ ASSERT(!route_node->is_base_node);
+ erts_mtx_unlock(&route_node->u.route.lock);
+}
+
+static ERTS_INLINE
+Eterm copy_route_key(DbRouteKey* dst, Eterm key, Uint key_size)
+{
+ dst->size = key_size;
+ if (key_size != 0) {
+ Eterm* hp = &dst->heap[0];
+ ErlOffHeap tmp_offheap;
+ tmp_offheap.first = NULL;
+ dst->term = copy_struct(key, key_size, &hp, &tmp_offheap);
+ dst->oh = tmp_offheap.first;
+ }
+ else {
+ ASSERT(is_immed(key));
+ dst->term = key;
+ dst->oh = NULL;
+ }
+ return dst->term;
+}
+
+static ERTS_INLINE
+void destroy_route_key(DbRouteKey* key)
+{
+ if (key->oh) {
+ ErlOffHeap oh;
+ oh.first = key->oh;
+ erts_cleanup_offheap(&oh);
+ }
+}
+
+static ERTS_INLINE
+void init_root_iterator(DbTableCATree* tb, CATreeRootIterator* iter,
+ int read_only)
+{
+ iter->tb = tb;
+ iter->read_only = read_only;
+ iter->locked_bnode = NULL;
+ iter->next_route_key = THE_NON_VALUE;
+ iter->search_key = NULL;
+}
+
+static ERTS_INLINE
+void lock_iter_base_node(CATreeRootIterator* iter,
+ DbTableCATreeNode *base_node,
+ DbTableCATreeNode *parent,
+ int current_level)
+{
+ ASSERT(!iter->locked_bnode);
+ if (iter->read_only)
+ rlock_base_node(base_node);
+ else {
+ wlock_base_node(base_node);
+ iter->bnode_parent = parent;
+ iter->bnode_level = current_level;
+ }
+ iter->locked_bnode = base_node;
+}
+
+static ERTS_INLINE
+void unlock_iter_base_node(CATreeRootIterator* iter)
+{
+ ASSERT(iter->locked_bnode);
+ if (iter->read_only)
+ runlock_base_node(iter->locked_bnode);
+ else if (iter->locked_bnode->u.base.is_valid) {
+ wunlock_adapt_base_node(iter->tb, iter->locked_bnode,
+ iter->bnode_parent, iter->bnode_level);
+ }
+ else
+ wunlock_base_node(iter->locked_bnode);
+ iter->locked_bnode = NULL;
+}
+
+static ERTS_INLINE
+void destroy_root_iterator(CATreeRootIterator* iter)
+{
+ if (iter->locked_bnode)
+ unlock_iter_base_node(iter);
+ if (iter->search_key) {
+ destroy_route_key(iter->search_key);
+ erts_free(ERTS_ALC_T_DB_TMP, iter->search_key);
+ }
+}
+
+typedef struct
+{
+ DbTableCATreeNode *parent;
+ int current_level;
+} FindBaseNode;
+
+static ERTS_INLINE
+DbTableCATreeNode* find_base_node(DbTableCATree* tb, Eterm key,
+ FindBaseNode* fbn)
+{
+ DbTableCATreeNode* ERTS_RESTRICT node = GET_ROOT_ACQB(tb);
+ if (fbn) {
+ fbn->parent = NULL;
+ fbn->current_level = 0;
+ }
+ while (!node->is_base_node) {
+ if (fbn) {
+ fbn->current_level++;
+ fbn->parent = node;
+ }
+ if (cmp_key_route(key, node) < 0) {
+ node = GET_LEFT_ACQB(node);
+ } else {
+ node = GET_RIGHT_ACQB(node);
+ }
+ }
+ return node;
+}
+
+static ERTS_INLINE
+DbTableCATreeNode* find_rlock_valid_base_node(DbTableCATree* tb, Eterm key)
+{
+ DbTableCATreeNode* base_node;
+
+ while (1) {
+ base_node = find_base_node(tb, key, NULL);
+ rlock_base_node(base_node);
+ if (base_node->u.base.is_valid)
+ break;
+ runlock_base_node(base_node);
+ }
+ return base_node;
+}
+
+static ERTS_INLINE
+DbTableCATreeNode* find_wlock_valid_base_node(DbTableCATree* tb, Eterm key,
+ FindBaseNode* fbn)
+{
+ DbTableCATreeNode* base_node;
+
+ while (1) {
+ base_node = find_base_node(tb, key, fbn);
+ wlock_base_node(base_node);
+ if (base_node->u.base.is_valid)
+ break;
+ wunlock_base_node(base_node);
+ }
+ return base_node;
+}
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+# define LC_ORDER(ORDER) ORDER
+#else
+# define LC_ORDER(ORDER) NIL
+#endif
+
+#define sizeof_base_node() \
+ offsetof(DbTableCATreeNode, u.base.end_of_struct__)
+
+static DbTableCATreeNode *create_base_node(DbTableCATree *tb,
+ TreeDbTerm* root)
+{
+ DbTableCATreeNode *p;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ p = erts_db_alloc(ERTS_ALC_T_DB_TABLE, (DbTable *) tb,
+ sizeof_base_node());
+
+ p->is_base_node = 1;
+ p->u.base.root = root;
+ if (tb->common.type & DB_FREQ_READ)
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ if (erts_ets_rwmtx_spin_count >= 0)
+ rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
+
+ erts_rwmtx_init_opt(&p->u.base.lock, &rwmtx_opt,
+ "erl_db_catree_base_node",
+ NIL,
+ ERTS_LOCK_FLAGS_CATEGORY_DB);
+ p->u.base.lock_statistics = ((tb->common.status & DB_CATREE_FORCE_SPLIT)
+ ? INT_MAX : 0);
+ p->u.base.is_valid = 1;
+ return p;
+}
+
+static ERTS_INLINE Uint sizeof_route_node(Uint key_size)
+{
+ return (offsetof(DbTableCATreeNode, u.route.key.heap)
+ + key_size*sizeof(Eterm));
+}
+
+static DbTableCATreeNode*
+create_route_node(DbTableCATree *tb,
+ DbTableCATreeNode *left,
+ DbTableCATreeNode *right,
+ DbTerm * keyTerm,
+ DbTableCATreeNode* lc_parent)
+{
+ Eterm key = GETKEY(tb,keyTerm->tpl);
+ int key_size = size_object(key);
+ DbTableCATreeNode* p = erts_db_alloc(ERTS_ALC_T_DB_TABLE,
+ (DbTable *) tb,
+ sizeof_route_node(key_size));
+
+ copy_route_key(&p->u.route.key, key, key_size);
+ p->is_base_node = 0;
+ p->u.route.is_valid = 1;
+ erts_atomic_init_nob(&p->u.route.left, (erts_aint_t)left);
+ erts_atomic_init_nob(&p->u.route.right, (erts_aint_t)right);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ /* Route node lock order is inverse tree depth (from leafs toward root) */
+ p->u.route.lc_order = (lc_parent == NULL ? MAX_SMALL :
+ lc_parent->u.route.lc_order - 1);
+ /*
+ * This assert may eventually fail as we don't increase 'lc_order' in join
+ * operations when route nodes move up in the tree.
+ * Tough luck if you run a lock-checking VM for such a long time on 32-bit.
+ */
+ ERTS_LC_ASSERT(p->u.route.lc_order >= 0);
+#endif
+ erts_mtx_init(&p->u.route.lock, "erl_db_catree_route_node",
+ LC_ORDER(make_small(p->u.route.lc_order)),
+ ERTS_LOCK_FLAGS_CATEGORY_DB);
+ return p;
+}
+
+static void do_free_base_node(void* vptr)
+{
+ DbTableCATreeNode *p = (DbTableCATreeNode *)vptr;
+ ASSERT(p->is_base_node);
+ erts_rwmtx_destroy(&p->u.base.lock);
+ erts_free(ERTS_ALC_T_DB_TABLE, p);
+}
+
+static void free_catree_base_node(DbTableCATree* tb, DbTableCATreeNode* p)
+{
+ ASSERT(p->is_base_node);
+ ERTS_DB_ALC_MEM_UPDATE_(tb, sizeof_base_node(), 0);
+ do_free_base_node(p);
+}
+
+static void do_free_route_node(void *vptr)
+{
+ DbTableCATreeNode *p = (DbTableCATreeNode *)vptr;
+ ASSERT(!p->is_base_node);
+ erts_mtx_destroy(&p->u.route.lock);
+ destroy_route_key(&p->u.route.key);
+ erts_free(ERTS_ALC_T_DB_TABLE, p);
+}
+
+static void free_catree_route_node(DbTableCATree* tb, DbTableCATreeNode* p)
+{
+ ASSERT(!p->is_base_node);
+ ERTS_DB_ALC_MEM_UPDATE_(tb, sizeof_route_node(p->u.route.key.size), 0);
+ do_free_route_node(p);
+}
+
+
+/*
+ * Returns the parent routing node of the specified
+ * route node 'child' if such a parent exists
+ * or NULL if 'child' is attached to the root.
+ */
+static ERTS_INLINE DbTableCATreeNode *
+parent_of(DbTableCATree *tb,
+ DbTableCATreeNode *child)
+{
+ Eterm key = GET_ROUTE_NODE_KEY(child);
+ DbTableCATreeNode *current = GET_ROOT_ACQB(tb);
+ DbTableCATreeNode *prev = NULL;
+
+ while (current != child) {
+ prev = current;
+ if (cmp_key_route(key, current) < 0) {
+ current = GET_LEFT_ACQB(current);
+ } else {
+ current = GET_RIGHT_ACQB(current);
+ }
+ }
+ return prev;
+}
+
+
+static ERTS_INLINE DbTableCATreeNode *
+leftmost_base_node(DbTableCATreeNode *root)
+{
+ DbTableCATreeNode *node = root;
+ while (!node->is_base_node) {
+ node = GET_LEFT_ACQB(node);
+ }
+ return node;
+}
+
+
+static ERTS_INLINE DbTableCATreeNode *
+rightmost_base_node(DbTableCATreeNode *root)
+{
+ DbTableCATreeNode *node = root;
+ while (!node->is_base_node) {
+ node = GET_RIGHT_ACQB(node);
+ }
+ return node;
+}
+
+
+static ERTS_INLINE DbTableCATreeNode *
+leftmost_route_node(DbTableCATreeNode *root)
+{
+ DbTableCATreeNode *node = root;
+ DbTableCATreeNode *prev_node = NULL;
+ while (!node->is_base_node) {
+ prev_node = node;
+ node = GET_LEFT_ACQB(node);
+ }
+ return prev_node;
+}
+
+static ERTS_INLINE DbTableCATreeNode*
+rightmost_route_node(DbTableCATreeNode *root)
+{
+ DbTableCATreeNode * node = root;
+ DbTableCATreeNode * prev_node = NULL;
+ while (!node->is_base_node) {
+ prev_node = node;
+ node = GET_RIGHT_ACQB(node);
+ }
+ return prev_node;
+}
+
+static ERTS_INLINE
+void init_tree_stack(DbTreeStack *stack,
+ TreeDbTerm **stack_array,
+ Uint init_slot)
+{
+ stack->array = stack_array;
+ stack->pos = 0;
+ stack->slot = init_slot;
+}
+
+static void join_catree(DbTableCATree *tb,
+ DbTableCATreeNode *thiz,
+ DbTableCATreeNode *parent)
+{
+ DbTableCATreeNode *gparent;
+ DbTableCATreeNode *neighbor;
+ DbTableCATreeNode *new_neighbor;
+ DbTableCATreeNode *neighbor_parent;
+
+ ASSERT(thiz->is_base_node);
+ if (parent == NULL) {
+ thiz->u.base.lock_statistics = 0;
+ wunlock_base_node(thiz);
+ return;
+ }
+ ASSERT(!parent->is_base_node);
+ if (GET_LEFT(parent) == thiz) {
+ neighbor = leftmost_base_node(GET_RIGHT_ACQB(parent));
+ if (try_wlock_base_node(&neighbor->u.base)) {
+ /* Failed to acquire lock */
+ thiz->u.base.lock_statistics = 0;
+ wunlock_base_node(thiz);
+ return;
+ } else if (!neighbor->u.base.is_valid) {
+ thiz->u.base.lock_statistics = 0;
+ wunlock_base_node(thiz);
+ wunlock_base_node(neighbor);
+ return;
+ } else {
+ lock_route_node(parent);
+ parent->u.route.is_valid = 0;
+ neighbor->u.base.is_valid = 0;
+ thiz->u.base.is_valid = 0;
+ gparent = NULL;
+ do {
+ if (gparent != NULL) {
+ unlock_route_node(gparent);
+ }
+ gparent = parent_of(tb, parent);
+ if (gparent != NULL)
+ lock_route_node(gparent);
+ } while (gparent != NULL && !gparent->u.route.is_valid);
+
+ if (gparent == NULL) {
+ SET_ROOT_RELB(tb, GET_RIGHT(parent));
+ } else if (GET_LEFT(gparent) == parent) {
+ SET_LEFT_RELB(gparent, GET_RIGHT(parent));
+ } else {
+ SET_RIGHT_RELB(gparent, GET_RIGHT(parent));
+ }
+ unlock_route_node(parent);
+ if (gparent != NULL) {
+ unlock_route_node(gparent);
+ }
+ {
+ TreeDbTerm* new_root = join_trees(thiz->u.base.root,
+ neighbor->u.base.root);
+ new_neighbor = create_base_node(tb, new_root);
+ }
+ if (GET_RIGHT(parent) == neighbor) {
+ neighbor_parent = gparent;
+ } else {
+ neighbor_parent = leftmost_route_node(GET_RIGHT(parent));
+ }
+ }
+ } else { /* Symetric case */
+ ASSERT(GET_RIGHT(parent) == thiz);
+ neighbor = rightmost_base_node(GET_LEFT_ACQB(parent));
+ if (try_wlock_base_node(&neighbor->u.base)) {
+ /* Failed to acquire lock */
+ thiz->u.base.lock_statistics = 0;
+ wunlock_base_node(thiz);
+ return;
+ } else if (!neighbor->u.base.is_valid) {
+ thiz->u.base.lock_statistics = 0;
+ wunlock_base_node(thiz);
+ wunlock_base_node(neighbor);
+ return;
+ } else {
+ lock_route_node(parent);
+ parent->u.route.is_valid = 0;
+ neighbor->u.base.is_valid = 0;
+ thiz->u.base.is_valid = 0;
+ gparent = NULL;
+ do {
+ if (gparent != NULL) {
+ unlock_route_node(gparent);
+ }
+ gparent = parent_of(tb, parent);
+ if (gparent != NULL) {
+ lock_route_node(gparent);
+ } else {
+ gparent = NULL;
+ }
+ } while (gparent != NULL && !gparent->u.route.is_valid);
+ if (gparent == NULL) {
+ SET_ROOT_RELB(tb, GET_LEFT(parent));
+ } else if (GET_RIGHT(gparent) == parent) {
+ SET_RIGHT_RELB(gparent, GET_LEFT(parent));
+ } else {
+ SET_LEFT_RELB(gparent, GET_LEFT(parent));
+ }
+ unlock_route_node(parent);
+ if (gparent != NULL) {
+ unlock_route_node(gparent);
+ }
+ {
+ TreeDbTerm* new_root = join_trees(neighbor->u.base.root,
+ thiz->u.base.root);
+ new_neighbor = create_base_node(tb, new_root);
+ }
+ if (GET_LEFT(parent) == neighbor) {
+ neighbor_parent = gparent;
+ } else {
+ neighbor_parent =
+ rightmost_route_node(GET_LEFT(parent));
+ }
+ }
+ }
+ /* Link in new neighbor and free nodes that are no longer in the tree */
+ if (neighbor_parent == NULL) {
+ SET_ROOT_RELB(tb, new_neighbor);
+ } else if (GET_LEFT(neighbor_parent) == neighbor) {
+ SET_LEFT_RELB(neighbor_parent, new_neighbor);
+ } else {
+ SET_RIGHT_RELB(neighbor_parent, new_neighbor);
+ }
+ wunlock_base_node(thiz);
+ wunlock_base_node(neighbor);
+ /* Free the parent and base */
+ erts_schedule_db_free(&tb->common,
+ do_free_route_node,
+ parent,
+ &parent->u.route.free_item,
+ sizeof_route_node(parent->u.route.key.size));
+ erts_schedule_db_free(&tb->common,
+ do_free_base_node,
+ thiz,
+ &thiz->u.base.free_item,
+ sizeof_base_node());
+ erts_schedule_db_free(&tb->common,
+ do_free_base_node,
+ neighbor,
+ &neighbor->u.base.free_item,
+ sizeof_base_node());
+}
+
+static void split_catree(DbTableCATree *tb,
+ DbTableCATreeNode* ERTS_RESTRICT base,
+ DbTableCATreeNode* ERTS_RESTRICT parent)
+{
+ TreeDbTerm *splitOutWriteBack;
+ DbTableCATreeNode* ERTS_RESTRICT new_left;
+ DbTableCATreeNode* ERTS_RESTRICT new_right;
+ DbTableCATreeNode* ERTS_RESTRICT new_route;
+
+ if (less_than_two_elements(base->u.base.root)) {
+ if (!(tb->common.status & DB_CATREE_FORCE_SPLIT))
+ base->u.base.lock_statistics = 0;
+ wunlock_base_node(base);
+ return;
+ } else {
+ TreeDbTerm *left_tree;
+ TreeDbTerm *right_tree;
+
+ split_tree(tb, base->u.base.root, &splitOutWriteBack,
+ &left_tree, &right_tree);
+
+ new_left = create_base_node(tb, left_tree);
+ new_right = create_base_node(tb, right_tree);
+ new_route = create_route_node(tb,
+ new_left,
+ new_right,
+ &splitOutWriteBack->dbterm,
+ parent);
+ if (parent == NULL) {
+ SET_ROOT_RELB(tb, new_route);
+ } else if(GET_LEFT(parent) == base) {
+ SET_LEFT_RELB(parent, new_route);
+ } else {
+ SET_RIGHT_RELB(parent, new_route);
+ }
+ base->u.base.is_valid = 0;
+ wunlock_base_node(base);
+ erts_schedule_db_free(&tb->common,
+ do_free_base_node,
+ base,
+ &base->u.base.free_item,
+ sizeof_base_node());
+ }
+}
+
+/*
+ * Helper functions for removing the table
+ */
+
+static void catree_add_base_node_to_free_list(
+ DbTableCATree *tb,
+ DbTableCATreeNode *base_node_container)
+{
+ base_node_container->u.base.next =
+ tb->base_nodes_to_free_list;
+ tb->base_nodes_to_free_list = base_node_container;
+}
+
+static void catree_deque_base_node_from_free_list(DbTableCATree *tb)
+{
+ if (tb->base_nodes_to_free_list == NULL) {
+ return; /* List empty */
+ } else {
+ DbTableCATreeNode *first = tb->base_nodes_to_free_list;
+ tb->base_nodes_to_free_list = first->u.base.next;
+ }
+}
+
+static DbTableCATreeNode *catree_first_base_node_from_free_list(
+ DbTableCATree *tb)
+{
+ return tb->base_nodes_to_free_list;
+}
+
+static SWord do_free_routing_nodes_catree_cont(DbTableCATree *tb, SWord num_left)
+{
+ DbTableCATreeNode *root;
+ DbTableCATreeNode *p;
+ for (;;) {
+ root = POP_NODE(&tb->free_stack_rnodes);
+ if (root == NULL) break;
+ else if(root->is_base_node) {
+ catree_add_base_node_to_free_list(tb, root);
+ break;
+ }
+ for (;;) {
+ if ((GET_LEFT(root) != NULL) &&
+ (p = GET_LEFT(root))->is_base_node) {
+ SET_LEFT(root, NULL);
+ catree_add_base_node_to_free_list(tb, p);
+ } else if ((GET_RIGHT(root) != NULL) &&
+ (p = GET_RIGHT(root))->is_base_node) {
+ SET_RIGHT(root, NULL);
+ catree_add_base_node_to_free_list(tb, p);
+ } else if ((p = GET_LEFT(root)) != NULL) {
+ SET_LEFT(root, NULL);
+ PUSH_NODE(&tb->free_stack_rnodes, root);
+ root = p;
+ } else if ((p = GET_RIGHT(root)) != NULL) {
+ SET_RIGHT(root, NULL);
+ PUSH_NODE(&tb->free_stack_rnodes, root);
+ root = p;
+ } else {
+ free_catree_route_node(tb, root);
+ if (--num_left >= 0) {
+ break;
+ } else {
+ return num_left; /* Done enough for now */
+ }
+ }
+ }
+ }
+ return num_left;
+}
+
+static SWord do_free_base_node_cont(DbTableCATree *tb, SWord num_left)
+{
+ TreeDbTerm *root;
+ TreeDbTerm *p;
+ DbTableCATreeNode *base_node_container =
+ catree_first_base_node_from_free_list(tb);
+ for (;;) {
+ root = POP_NODE(&tb->free_stack_elems);
+ if (root == NULL) break;
+ for (;;) {
+ if ((p = root->left) != NULL) {
+ root->left = NULL;
+ PUSH_NODE(&tb->free_stack_elems, root);
+ root = p;
+ } else if ((p = root->right) != NULL) {
+ root->right = NULL;
+ PUSH_NODE(&tb->free_stack_elems, root);
+ root = p;
+ } else {
+ DEC_NITEMS((DbTable*)tb);
+ tb->nr_of_deleted_items++;
+ free_term((DbTable*)tb, root);
+ if (--num_left >= 0) {
+ break;
+ } else {
+ return num_left; /* Done enough for now */
+ }
+ }
+ }
+ }
+ catree_deque_base_node_from_free_list(tb);
+ free_catree_base_node(tb, base_node_container);
+ base_node_container = catree_first_base_node_from_free_list(tb);
+ if (base_node_container != NULL) {
+ PUSH_NODE(&tb->free_stack_elems, base_node_container->u.base.root);
+ }
+ return num_left;
+}
+
+
+/*
+** Initialization function
+*/
+
+void db_initialize_catree(void)
+{
+ return;
+};
+
+/*
+** Table interface routines (i.e., what's called by the bif's)
+*/
+
+int db_create_catree(Process *p, DbTable *tbl)
+{
+ DbTableCATree *tb = &tbl->catree;
+ DbTableCATreeNode *root;
+
+ root = create_base_node(tb, NULL);
+ tb->deletion = 0;
+ tb->base_nodes_to_free_list = NULL;
+ tb->nr_of_deleted_items = 0;
+#ifdef DEBUG
+ tbl->common.status |= DB_CATREE_DEBUG_RANDOM_SPLIT_JOIN;
+#endif
+ erts_atomic_init_relb(&(tb->root), (erts_aint_t)root);
+ return DB_ERROR_NONE;
+}
+
+static int db_first_catree(Process *p, DbTable *tbl, Eterm *ret)
+{
+ TreeDbTerm *root;
+ CATreeRootIterator iter;
+ int result;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ root = *catree_find_first_root(&iter);
+ if (!root) {
+ TreeDbTerm **pp = catree_find_next_root(&iter, NULL);
+ root = pp ? *pp : NULL;
+ }
+
+ result = db_first_tree_common(p, tbl, root, ret, NULL);
+
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_next_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTreeStack stack;
+ TreeDbTerm * stack_array[STACK_NEED];
+ TreeDbTerm **rootp;
+ CATreeRootIterator iter;
+ int result;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ iter.next_route_key = key;
+ rootp = catree_find_next_root(&iter, NULL);
+
+ do {
+ init_tree_stack(&stack, stack_array, 0);
+ result = db_next_tree_common(p, tbl, (rootp ? *rootp : NULL), key, ret, &stack);
+ if (result != DB_ERROR_NONE || *ret != am_EOT)
+ break;
+
+ rootp = catree_find_next_root(&iter, NULL);
+ } while (rootp);
+
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_last_catree(Process *p, DbTable *tbl, Eterm *ret)
+{
+ TreeDbTerm *root;
+ CATreeRootIterator iter;
+ int result;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ root = *catree_find_last_root(&iter);
+ if (!root) {
+ TreeDbTerm **pp = catree_find_prev_root(&iter, NULL);
+ root = pp ? *pp : NULL;
+ }
+
+ result = db_last_tree_common(p, tbl, root, ret, NULL);
+
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_prev_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTreeStack stack;
+ TreeDbTerm * stack_array[STACK_NEED];
+ TreeDbTerm **rootp;
+ CATreeRootIterator iter;
+ int result;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ iter.next_route_key = key;
+ rootp = catree_find_prev_root(&iter, NULL);
+
+ do {
+ init_tree_stack(&stack, stack_array, 0);
+ result = db_prev_tree_common(p, tbl, (rootp ? *rootp : NULL), key, ret,
+ &stack);
+ if (result != DB_ERROR_NONE || *ret != am_EOT)
+ break;
+ rootp = catree_find_prev_root(&iter, NULL);
+ } while (rootp);
+
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_put_catree(DbTable *tbl, Eterm obj, int key_clash_fail)
+{
+ DbTableCATree *tb = &tbl->catree;
+ Eterm key = GETKEY(&tb->common, tuple_val(obj));
+ FindBaseNode fbn;
+ DbTableCATreeNode* node = find_wlock_valid_base_node(tb, key, &fbn);
+ int result = db_put_tree_common(&tb->common, &node->u.base.root, obj,
+ key_clash_fail, NULL);
+ wunlock_adapt_base_node(tb, node, fbn.parent, fbn.current_level);
+ return result;
+}
+
+static int db_get_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ DbTableCATreeNode* node = find_rlock_valid_base_node(tb, key);
+ int result = db_get_tree_common(p, &tb->common,
+ node->u.base.root,
+ key, ret, NULL);
+ runlock_base_node(node);
+ return result;
+}
+
+TreeDbTerm** catree_find_root(Eterm key, CATreeRootIterator* iter)
+{
+ FindBaseNode fbn;
+ DbTableCATreeNode* base_node;
+
+ while (1) {
+ base_node = find_base_node(iter->tb, key, &fbn);
+ lock_iter_base_node(iter, base_node, fbn.parent, fbn.current_level);
+ if (base_node->u.base.is_valid)
+ break;
+ unlock_iter_base_node(iter);
+ }
+ return &base_node->u.base.root;
+}
+
+static Eterm save_iter_search_key(CATreeRootIterator* iter, Eterm key)
+{
+ Uint key_size;
+
+ if (is_immed(key))
+ return key;
+
+ if (iter->search_key) {
+ if (key == iter->search_key->term)
+ return key; /* already saved */
+ destroy_route_key(iter->search_key);
+ }
+ key_size = size_object(key);
+ if (!iter->search_key || key_size > iter->search_key->size) {
+ iter->search_key = erts_realloc(ERTS_ALC_T_DB_TMP,
+ iter->search_key,
+ (offsetof(DbRouteKey, heap)
+ + key_size*sizeof(Eterm)));
+ }
+ return copy_route_key(iter->search_key, key, key_size);
+}
+
+TreeDbTerm** catree_find_nextprev_root(CATreeRootIterator *iter,
+ int forward,
+ Eterm *search_keyp)
+{
+#ifdef DEBUG
+ DbTableCATreeNode *rejected_invalid = NULL;
+ DbTableCATreeNode *rejected_empty = NULL;
+#endif
+ DbTableCATreeNode *node;
+ DbTableCATreeNode *parent;
+ DbTableCATreeNode* next_route_node;
+ Eterm route_key = iter->next_route_key;
+ int current_level;
+
+ if (iter->locked_bnode) {
+ if (search_keyp)
+ *search_keyp = save_iter_search_key(iter, *search_keyp);
+ unlock_iter_base_node(iter);
+ }
+
+ if (is_non_value(route_key))
+ return NULL;
+
+ while (1) {
+ node = GET_ROOT_ACQB(iter->tb);
+ current_level = 0;
+ parent = NULL;
+ next_route_node = NULL;
+ while (!node->is_base_node) {
+ current_level++;
+ parent = node;
+ if (forward) {
+ if (cmp_key_route(route_key,node) < 0) {
+ next_route_node = node;
+ node = GET_LEFT_ACQB(node);
+ } else {
+ node = GET_RIGHT_ACQB(node);
+ }
+ }
+ else {
+ if (cmp_key_route(route_key,node) > 0) {
+ next_route_node = node;
+ node = GET_RIGHT_ACQB(node);
+ } else {
+ node = GET_LEFT_ACQB(node);
+ }
+ }
+ }
+ ASSERT(node != rejected_invalid);
+ lock_iter_base_node(iter, node, parent, current_level);
+ if (node->u.base.is_valid) {
+ ASSERT(node != rejected_empty);
+ if (node->u.base.root) {
+ iter->next_route_key = (next_route_node ?
+ next_route_node->u.route.key.term :
+ THE_NON_VALUE);
+ iter->locked_bnode = node;
+ return &node->u.base.root;
+ }
+ if (!next_route_node) {
+ unlock_iter_base_node(iter);
+ return NULL;
+ }
+ route_key = next_route_node->u.route.key.term;
+ IF_DEBUG(rejected_empty = node);
+ }
+ else
+ IF_DEBUG(rejected_invalid = node);
+
+ /* Retry */
+ unlock_iter_base_node(iter);
+ }
+}
+
+TreeDbTerm** catree_find_next_root(CATreeRootIterator *iter, Eterm* keyp)
+{
+ return catree_find_nextprev_root(iter, 1, keyp);
+}
+
+TreeDbTerm** catree_find_prev_root(CATreeRootIterator *iter, Eterm* keyp)
+{
+ return catree_find_nextprev_root(iter, 0, keyp);
+}
+
+/* @brief Find root of tree where object with smallest key of all larger than
+ * partially bound key may reside. Can be used as a starting point for
+ * a reverse iteration with pb_key.
+ *
+ * @param pb_key The partially bound key. Example {42, '$1'}
+ * @param iter An initialized root iterator.
+ *
+ * @return Pointer to found root pointer. May not be NULL.
+ */
+TreeDbTerm** catree_find_next_from_pb_key_root(Eterm pb_key,
+ CATreeRootIterator* iter)
+{
+#ifdef DEBUG
+ DbTableCATreeNode *rejected_base = NULL;
+#endif
+ DbTableCATreeNode *node;
+ DbTableCATreeNode *parent;
+ DbTableCATreeNode* next_route_node;
+ int current_level;
+
+ ASSERT(!iter->locked_bnode);
+
+ while (1) {
+ node = GET_ROOT_ACQB(iter->tb);
+ current_level = 0;
+ parent = NULL;
+ next_route_node = NULL;
+ while (!node->is_base_node) {
+ current_level++;
+ parent = node;
+ if (cmp_partly_bound(pb_key, GET_ROUTE_NODE_KEY(node)) >= 0) {
+ next_route_node = node;
+ node = GET_RIGHT_ACQB(node);
+ } else {
+ node = GET_LEFT_ACQB(node);
+ }
+ }
+ ASSERT(node != rejected_base);
+ lock_iter_base_node(iter, node, parent, current_level);
+ if (node->u.base.is_valid) {
+ iter->next_route_key = (next_route_node ?
+ next_route_node->u.route.key.term :
+ THE_NON_VALUE);
+ return &node->u.base.root;
+ }
+ /* Retry */
+ unlock_iter_base_node(iter);
+#ifdef DEBUG
+ rejected_base = node;
+#endif
+ }
+}
+
+/* @brief Find root of tree where object with largest key of all smaller than
+ * partially bound key may reside. Can be used as a starting point for
+ * a forward iteration with pb_key.
+ *
+ * @param pb_key The partially bound key. Example {42, '$1'}
+ * @param iter An initialized root iterator.
+ *
+ * @return Pointer to found root pointer. May not be NULL.
+ */
+TreeDbTerm** catree_find_prev_from_pb_key_root(Eterm key,
+ CATreeRootIterator* iter)
+{
+#ifdef DEBUG
+ DbTableCATreeNode *rejected_base = NULL;
+#endif
+ DbTableCATreeNode *node;
+ DbTableCATreeNode *parent;
+ DbTableCATreeNode* next_route_node;
+ int current_level;
+
+ ASSERT(!iter->locked_bnode);
+
+ while (1) {
+ node = GET_ROOT_ACQB(iter->tb);
+ current_level = 0;
+ parent = NULL;
+ next_route_node = NULL;
+ while (!node->is_base_node) {
+ current_level++;
+ parent = node;
+ if (cmp_partly_bound(key, GET_ROUTE_NODE_KEY(node)) <= 0) {
+ next_route_node = node;
+ node = GET_LEFT_ACQB(node);
+ } else {
+ node = GET_RIGHT_ACQB(node);
+ }
+ }
+ ASSERT(node != rejected_base);
+ lock_iter_base_node(iter, node, parent, current_level);
+ if (node->u.base.is_valid) {
+ iter->next_route_key = (next_route_node ?
+ next_route_node->u.route.key.term :
+ THE_NON_VALUE);
+ return &node->u.base.root;
+ }
+ /* Retry */
+ unlock_iter_base_node(iter);
+#ifdef DEBUG
+ rejected_base = node;
+#endif
+ }
+}
+
+static TreeDbTerm** catree_find_firstlast_root(CATreeRootIterator* iter,
+ int first)
+{
+#ifdef DEBUG
+ DbTableCATreeNode *rejected_base = NULL;
+#endif
+ DbTableCATreeNode *node;
+ DbTableCATreeNode* next_route_node;
+ int current_level;
+
+ while (1) {
+ node = GET_ROOT_ACQB(iter->tb);
+ current_level = 0;
+ next_route_node = NULL;
+ while (!node->is_base_node) {
+ current_level++;
+ next_route_node = node;
+ node = first ? GET_LEFT_ACQB(node) : GET_RIGHT_ACQB(node);
+ }
+ ASSERT(node != rejected_base);
+ lock_iter_base_node(iter, node, next_route_node, current_level);
+ if (node->u.base.is_valid) {
+ iter->next_route_key = (next_route_node ?
+ next_route_node->u.route.key.term :
+ THE_NON_VALUE);
+ return &node->u.base.root;
+ }
+ /* Retry */
+ unlock_iter_base_node(iter);
+#ifdef DEBUG
+ rejected_base = node;
+#endif
+ }
+}
+
+TreeDbTerm** catree_find_first_root(CATreeRootIterator* iter)
+{
+ return catree_find_firstlast_root(iter, 1);
+}
+
+TreeDbTerm** catree_find_last_root(CATreeRootIterator* iter)
+{
+ return catree_find_firstlast_root(iter, 0);
+}
+
+static int db_member_catree(DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ DbTableCATreeNode* node = find_rlock_valid_base_node(tb, key);
+ int result = db_member_tree_common(&tb->common,
+ node->u.base.root,
+ key, ret, NULL);
+ runlock_base_node(node);
+ return result;
+}
+
+static int db_get_element_catree(Process *p, DbTable *tbl,
+ Eterm key, int ndex, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ DbTableCATreeNode* node = find_rlock_valid_base_node(tb, key);
+ int result = db_get_element_tree_common(p, &tb->common,
+ node->u.base.root,
+ key, ndex, ret, NULL);
+ runlock_base_node(node);
+ return result;
+}
+
+static int db_erase_catree(DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ FindBaseNode fbn;
+ DbTableCATreeNode* node = find_wlock_valid_base_node(tb, key, &fbn);
+ int result = db_erase_tree_common(tbl, &node->u.base.root, key,
+ ret, NULL);
+ wunlock_adapt_base_node(tb, node, fbn.parent, fbn.current_level);
+ return result;
+}
+
+static int db_erase_object_catree(DbTable *tbl, Eterm object, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ Eterm key = GETKEY(&tb->common, tuple_val(object));
+ FindBaseNode fbn;
+ DbTableCATreeNode* node = find_wlock_valid_base_node(tb, key, &fbn);
+ int result = db_erase_object_tree_common(tbl,
+ &node->u.base.root,
+ object,
+ ret,
+ NULL);
+ wunlock_adapt_base_node(tb, node, fbn.parent, fbn.current_level);
+ return result;
+}
+
+
+static int db_slot_catree(Process *p, DbTable *tbl,
+ Eterm slot_term, Eterm *ret)
+{
+ int result;
+ CATreeRootIterator iter;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ result = db_slot_tree_common(p, tbl, *catree_find_first_root(&iter),
+ slot_term, ret, NULL, &iter);
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_select_continue_catree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret,
+ enum DbIterSafety* safety_p)
+{
+ int result;
+ CATreeRootIterator iter;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ result = db_select_continue_tree_common(p, &tbl->common,
+ continuation, ret, NULL, &iter);
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_select_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, int reverse, Eterm *ret,
+ enum DbIterSafety safety)
+{
+ int result;
+ CATreeRootIterator iter;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ result = db_select_tree_common(p, tbl, tid, pattern, reverse, ret,
+ NULL, &iter);
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_select_count_continue_catree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret,
+ enum DbIterSafety* safety_p)
+{
+ int result;
+ CATreeRootIterator iter;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ result = db_select_count_continue_tree_common(p, tbl,
+ continuation, ret, NULL,
+ &iter);
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_select_count_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety safety)
+{
+ int result;
+ CATreeRootIterator iter;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ result = db_select_count_tree_common(p, tbl,
+ tid, pattern, ret, NULL, &iter);
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_select_chunk_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Sint chunk_size,
+ int reversed, Eterm *ret,
+ enum DbIterSafety safety)
+{
+ int result;
+ CATreeRootIterator iter;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ result = db_select_chunk_tree_common(p, tbl,
+ tid, pattern, chunk_size, reversed, ret,
+ NULL, &iter);
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_select_delete_continue_catree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret,
+ enum DbIterSafety* safety_p)
+{
+ DbTreeStack stack;
+ TreeDbTerm * stack_array[STACK_NEED];
+ int result;
+ CATreeRootIterator iter;
+
+ init_root_iterator(&tbl->catree, &iter, 0);
+ init_tree_stack(&stack, stack_array, 0);
+ result = db_select_delete_continue_tree_common(p, tbl, continuation, ret,
+ &stack, &iter);
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_select_delete_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety safety)
+{
+ DbTreeStack stack;
+ TreeDbTerm * stack_array[STACK_NEED];
+ int result;
+ CATreeRootIterator iter;
+
+ init_root_iterator(&tbl->catree, &iter, 0);
+ init_tree_stack(&stack, stack_array, 0);
+ result = db_select_delete_tree_common(p, tbl,
+ tid, pattern, ret, &stack,
+ &iter);
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_select_replace_catree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety safety_p)
+{
+ int result;
+ CATreeRootIterator iter;
+
+ init_root_iterator(&tbl->catree, &iter, 0);
+ result = db_select_replace_tree_common(p, tbl,
+ tid, pattern, ret, NULL, &iter);
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_select_replace_continue_catree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety* safety_p)
+{
+ int result;
+ CATreeRootIterator iter;
+
+ init_root_iterator(&tbl->catree, &iter, 0);
+ result = db_select_replace_continue_tree_common(p, tbl, continuation, ret,
+ NULL, &iter);
+ destroy_root_iterator(&iter);
+ return result;
+}
+
+static int db_take_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableCATree *tb = &tbl->catree;
+ FindBaseNode fbn;
+ DbTableCATreeNode* node = find_wlock_valid_base_node(tb, key, &fbn);
+ int result = db_take_tree_common(p, tbl, &node->u.base.root, key,
+ ret, NULL);
+ wunlock_adapt_base_node(tb, node, fbn.parent, fbn.current_level);
+ return result;
+}
+
+/*
+** Other interface routines (not directly coupled to one bif)
+*/
+
+
+/* Display tree contents (for dump) */
+static void db_print_catree(fmtfn_t to, void *to_arg,
+ int show, DbTable *tbl)
+{
+ CATreeRootIterator iter;
+ TreeDbTerm** root;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ root = catree_find_first_root(&iter);
+ do {
+ db_print_tree_common(to, to_arg, show, *root, tbl);
+ root = catree_find_next_root(&iter, NULL);
+ } while (root);
+ destroy_root_iterator(&iter);
+}
+
+/* Release all memory occupied by a single table */
+static int db_free_table_catree(DbTable *tbl)
+{
+ while (db_free_table_continue_catree(tbl, ERTS_SWORD_MAX) < 0)
+ ;
+ return 1;
+}
+
+static SWord db_free_table_continue_catree(DbTable *tbl, SWord reds)
+{
+ DbTableCATreeNode *first_base_node;
+ DbTableCATree *tb = &tbl->catree;
+ if (!tb->deletion) {
+ tb->deletion = 1;
+ tb->free_stack_elems.array =
+ erts_db_alloc(ERTS_ALC_T_DB_STK,
+ (DbTable *) tb,
+ sizeof(TreeDbTerm *) * STACK_NEED);
+ tb->free_stack_elems.pos = 0;
+ tb->free_stack_elems.slot = 0;
+ tb->free_stack_rnodes.array =
+ erts_db_alloc(ERTS_ALC_T_DB_STK,
+ (DbTable *) tb,
+ sizeof(DbTableCATreeNode *) * STACK_NEED);
+ tb->free_stack_rnodes.pos = 0;
+ tb->free_stack_rnodes.size = STACK_NEED;
+ PUSH_NODE(&tb->free_stack_rnodes, GET_ROOT(tb));
+ tb->is_routing_nodes_freed = 0;
+ tb->base_nodes_to_free_list = NULL;
+ tb->nr_of_deleted_items = 0;
+ }
+ if ( ! tb->is_routing_nodes_freed ) {
+ reds = do_free_routing_nodes_catree_cont(tb, reds);
+ if (reds < 0) {
+ return reds; /* Not finished */
+ } else {
+ tb->is_routing_nodes_freed = 1; /* Ready with the routing nodes */
+ first_base_node = catree_first_base_node_from_free_list(tb);
+ PUSH_NODE(&tb->free_stack_elems, first_base_node->u.base.root);
+ }
+ }
+ while (catree_first_base_node_from_free_list(tb) != NULL) {
+ reds = do_free_base_node_cont(tb, reds);
+ if (reds < 0) {
+ return reds; /* Continue later */
+ }
+ }
+ /* Time to free the main structure*/
+ erts_db_free(ERTS_ALC_T_DB_STK,
+ (DbTable *) tb,
+ (void *) tb->free_stack_elems.array,
+ sizeof(TreeDbTerm *) * STACK_NEED);
+ erts_db_free(ERTS_ALC_T_DB_STK,
+ (DbTable *) tb,
+ (void *) tb->free_stack_rnodes.array,
+ sizeof(DbTableCATreeNode *) * STACK_NEED);
+ return 1;
+}
+
+static
+int db_catree_nr_of_items_deleted_wb_dtor(Binary *context_bin) {
+ (void)context_bin;
+ return 1;
+}
+
+typedef struct {
+ Uint nr_of_deleted_items;
+} DbCATreeNrOfItemsDeletedWb;
+
+static Eterm
+create_and_install_num_of_deleted_items_wb_bin(Process *p, DbTableCATree *tb)
+{
+ Binary* bin =
+ erts_create_magic_binary(sizeof(DbCATreeNrOfItemsDeletedWb),
+ db_catree_nr_of_items_deleted_wb_dtor);
+ DbCATreeNrOfItemsDeletedWb* data = ERTS_MAGIC_BIN_DATA(bin);
+ Eterm* hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ Eterm mref = erts_mk_magic_ref(&hp, &MSO(p), bin);
+ data->nr_of_deleted_items = 0;
+ tb->nr_of_deleted_items_wb = bin;
+ erts_refc_inctest(&bin->intern.refc, 2);
+ return mref;
+}
+
+static Eterm db_delete_all_objects_get_nitems_from_holder_catree(Process* p,
+ Eterm mref)
+{
+ Binary* bin = erts_magic_ref2bin(mref);
+ DbCATreeNrOfItemsDeletedWb* data = ERTS_MAGIC_BIN_DATA(bin);
+ return erts_make_integer(data->nr_of_deleted_items, p);
+}
+
+static SWord db_delete_all_objects_catree(Process* p,
+ DbTable* tbl,
+ SWord reds,
+ Eterm* nitems_holder_wb)
+{
+ DbTableCATree *tb = &tbl->catree;
+ DbCATreeNrOfItemsDeletedWb* data;
+ if (!tb->deletion) {
+ *nitems_holder_wb =
+ create_and_install_num_of_deleted_items_wb_bin(p, tb);
+ }
+ reds = db_free_table_continue_catree(tbl, reds);
+ if (reds < 0)
+ return reds;
+ data = ERTS_MAGIC_BIN_DATA(tb->nr_of_deleted_items_wb);
+ data->nr_of_deleted_items = tb->nr_of_deleted_items;
+ erts_bin_release(tb->nr_of_deleted_items_wb);
+ db_create_catree(p, tbl);
+ return reds;
+}
+
+
+static void do_for_route_nodes(DbTableCATreeNode* node,
+ void (*func)(ErlOffHeap *, void *),
+ void *arg)
+{
+ ErlOffHeap tmp_offheap;
+
+ if (!GET_LEFT(node)->is_base_node)
+ do_for_route_nodes(GET_LEFT(node), func, arg);
+
+ tmp_offheap.first = node->u.route.key.oh;
+ tmp_offheap.overhead = 0;
+ (*func)(&tmp_offheap, arg);
+
+ if (!GET_RIGHT(node)->is_base_node)
+ do_for_route_nodes(GET_RIGHT(node), func, arg);
+}
+
+static void db_foreach_offheap_catree(DbTable *tbl,
+ void (*func)(ErlOffHeap *, void *),
+ void *arg)
+{
+ CATreeRootIterator iter;
+ TreeDbTerm** root;
+
+ init_root_iterator(&tbl->catree, &iter, 1);
+ root = catree_find_first_root(&iter);
+ do {
+ db_foreach_offheap_tree_common(*root, func, arg);
+ root = catree_find_next_root(&iter, NULL);
+ } while (root);
+ destroy_root_iterator(&iter);
+
+ do_for_route_nodes(GET_ROOT(&tbl->catree), func, arg);
+}
+
+static int db_lookup_dbterm_catree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
+ DbUpdateHandle *handle)
+{
+ DbTableCATree *tb = &tbl->catree;
+ FindBaseNode fbn;
+ DbTableCATreeNode* node = find_wlock_valid_base_node(tb, key, &fbn);
+ int res = db_lookup_dbterm_tree_common(p, tbl, &node->u.base.root, key,
+ obj, handle, NULL);
+ if (res == 0) {
+ wunlock_adapt_base_node(tb, node, fbn.parent, fbn.current_level);
+ } else {
+ /* db_finalize_dbterm_catree will unlock */
+ handle->u.catree.base_node = node;
+ handle->u.catree.parent = fbn.parent;
+ handle->u.catree.current_level = fbn.current_level;
+ }
+ return res;
+}
+
+static void db_finalize_dbterm_catree(int cret, DbUpdateHandle *handle)
+{
+ DbTableCATree *tb = &(handle->tb->catree);
+ db_finalize_dbterm_tree_common(cret, handle, NULL);
+ wunlock_adapt_base_node(tb, handle->u.catree.base_node,
+ handle->u.catree.parent,
+ handle->u.catree.current_level);
+ return;
+}
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+static void erts_lcnt_enable_db_catree_lock_count_helper(DbTableCATree *tb,
+ DbTableCATreeNode *node,
+ int enable)
+{
+ erts_lcnt_ref_t *lcnt_ref;
+ erts_lock_flags_t lock_type;
+ if (node->is_base_node) {
+ lcnt_ref = &GET_BASE_NODE_LOCK(node)->lcnt;
+ lock_type = ERTS_LOCK_TYPE_RWMUTEX;
+ } else {
+ erts_lcnt_enable_db_catree_lock_count_helper(tb, GET_LEFT(node), enable);
+ erts_lcnt_enable_db_catree_lock_count_helper(tb, GET_RIGHT(node), enable);
+ lcnt_ref = &GET_ROUTE_NODE_LOCK(node)->lcnt;
+ lock_type = ERTS_LOCK_TYPE_MUTEX;
+ }
+ if (enable) {
+ erts_lcnt_install_new_lock_info(lcnt_ref, "db_hash_slot", tb->common.the_name,
+ lock_type | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(lcnt_ref);
+ }
+}
+
+void erts_lcnt_enable_db_catree_lock_count(DbTableCATree *tb, int enable)
+{
+ erts_lcnt_enable_db_catree_lock_count_helper(tb, GET_ROOT(tb), enable);
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
+
+void db_catree_force_split(DbTableCATree* tb, int on)
+{
+ CATreeRootIterator iter;
+ TreeDbTerm** root;
+
+ init_root_iterator(tb, &iter, 1);
+ root = catree_find_first_root(&iter);
+ do {
+ iter.locked_bnode->u.base.lock_statistics = (on ? INT_MAX : 0);
+ root = catree_find_next_root(&iter, NULL);
+ } while (root);
+ destroy_root_iterator(&iter);
+
+ if (on)
+ tb->common.status |= DB_CATREE_FORCE_SPLIT;
+ else
+ tb->common.status &= ~DB_CATREE_FORCE_SPLIT;
+}
+
+void db_catree_debug_random_split_join(DbTableCATree* tb, int on)
+{
+ if (on)
+ tb->common.status |= DB_CATREE_DEBUG_RANDOM_SPLIT_JOIN;
+ else
+ tb->common.status &= ~DB_CATREE_DEBUG_RANDOM_SPLIT_JOIN;
+}
+
+void db_calc_stats_catree(DbTableCATree* tb, DbCATreeStats* stats)
+{
+ DbTableCATreeNode* stack[ERL_DB_CATREE_MAX_ROUTE_NODE_LAYER_HEIGHT];
+ DbTableCATreeNode* node;
+ Uint depth = 0;
+
+ stats->route_nodes = 0;
+ stats->base_nodes = 0;
+ stats->max_depth = 0;
+
+ node = GET_ROOT(tb);
+ do {
+ while (!node->is_base_node) {
+ stats->route_nodes++;
+ ASSERT(depth < sizeof(stack)/sizeof(*stack));
+ stack[depth++] = node; /* PUSH parent */
+ if (stats->max_depth < depth)
+ stats->max_depth = depth;
+ node = GET_LEFT(node);
+ }
+ stats->base_nodes++;
+
+ while (depth > 0) {
+ DbTableCATreeNode* parent = stack[depth-1];
+ if (node == GET_LEFT(parent)) {
+ node = GET_RIGHT(parent);
+ break;
+ }
+ else {
+ ASSERT(node == GET_RIGHT(parent));
+ node = parent;
+ depth--; /* POP parent */
+ }
+ }
+ } while (depth > 0);
+}
+
+#ifdef HARDDEBUG
+
+/*
+ * Not called, but kept as it might come to use
+ */
+static inline int my_check_table_tree(TreeDbTerm *t)
+{
+ int lh, rh;
+ if (t == NULL)
+ return 0;
+ lh = my_check_table_tree(t->left);
+ rh = my_check_table_tree(t->right);
+ if ((rh - lh) != t->balance) {
+ erts_fprintf(stderr, "Invalid tree balance for this node:\n");
+ erts_fprintf(stderr,"balance = %d, left = 0x%08X, right = 0x%08X\n",
+ t->balance, t->left, t->right);
+ erts_fprintf(stderr,"\nDump:\n---------------------------------\n");
+ erts_fprintf(stderr,"\n---------------------------------\n");
+ abort();
+ }
+ return ((rh > lh) ? rh : lh) + 1;
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_db_catree.h b/erts/emulator/beam/erl_db_catree.h
new file mode 100644
index 0000000000..cf3498dabb
--- /dev/null
+++ b/erts/emulator/beam/erl_db_catree.h
@@ -0,0 +1,137 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Implementation of ETS ordered_set table type with
+ * fine-grained synchronization.
+ *
+ * Author: Kjell Winblad
+ *
+ * "erl_db_catree.c" contains more details about the implementation.
+ *
+ */
+
+#ifndef _DB_CATREE_H
+#define _DB_CATREE_H
+
+struct DbTableCATreeNode;
+
+typedef struct {
+ Eterm term;
+ struct erl_off_heap_header* oh;
+ Uint size;
+ Eterm heap[1];
+} DbRouteKey;
+
+typedef struct {
+ erts_rwmtx_t lock; /* The lock for this base node */
+ Sint lock_statistics;
+ int is_valid; /* If this base node is still valid */
+ TreeDbTerm *root; /* The root of the sequential tree */
+ ErtsThrPrgrLaterOp free_item; /* Used when freeing using thread progress */
+ struct DbTableCATreeNode * next; /* Used when gradually deleting */
+
+ char end_of_struct__;
+} DbTableCATreeBaseNode;
+
+typedef struct {
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ Sint lc_order;
+#endif
+ ErtsThrPrgrLaterOp free_item; /* Used when freeing using thread progress */
+ erts_mtx_t lock; /* Used when joining route nodes */
+ int is_valid; /* If this route node is still valid */
+ erts_atomic_t left;
+ erts_atomic_t right;
+ DbRouteKey key;
+} DbTableCATreeRouteNode;
+
+typedef struct DbTableCATreeNode {
+ int is_base_node;
+ union {
+ DbTableCATreeRouteNode route;
+ DbTableCATreeBaseNode base;
+ } u;
+} DbTableCATreeNode;
+
+typedef struct {
+ Uint pos; /* Current position on stack */
+ Uint size; /* The size of the stack array */
+ DbTableCATreeNode** array; /* The stack */
+} CATreeNodeStack;
+
+typedef struct db_table_catree {
+ DbTableCommon common;
+
+ /* CA Tree-specific fields */
+ erts_atomic_t root; /* The tree root (DbTableCATreeNode*) */
+ Uint deletion; /* Being deleted */
+ DbTreeStack free_stack_elems;/* Used for deletion ...*/
+ CATreeNodeStack free_stack_rnodes;
+ DbTableCATreeNode *base_nodes_to_free_list;
+ int is_routing_nodes_freed;
+ /* The fields below are used by delete_all_objects and
+ select_delete(DeleteAll)*/
+ Uint nr_of_deleted_items;
+ Binary* nr_of_deleted_items_wb;
+} DbTableCATree;
+
+typedef struct {
+ DbTableCATree* tb;
+ Eterm next_route_key;
+ DbTableCATreeNode* locked_bnode;
+ DbTableCATreeNode* bnode_parent;
+ int bnode_level;
+ int read_only;
+ DbRouteKey* search_key;
+} CATreeRootIterator;
+
+
+void db_initialize_catree(void);
+
+int db_create_catree(Process *p, DbTable *tbl);
+
+TreeDbTerm** catree_find_root(Eterm key, CATreeRootIterator*);
+
+TreeDbTerm** catree_find_next_from_pb_key_root(Eterm key, CATreeRootIterator*);
+TreeDbTerm** catree_find_prev_from_pb_key_root(Eterm key, CATreeRootIterator*);
+TreeDbTerm** catree_find_nextprev_root(CATreeRootIterator*, int next, Eterm* keyp);
+TreeDbTerm** catree_find_next_root(CATreeRootIterator*, Eterm* keyp);
+TreeDbTerm** catree_find_prev_root(CATreeRootIterator*, Eterm* keyp);
+TreeDbTerm** catree_find_first_root(CATreeRootIterator*);
+TreeDbTerm** catree_find_last_root(CATreeRootIterator*);
+
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_catree_lock_count(DbTableCATree *tb, int enable);
+#endif
+
+void db_catree_force_split(DbTableCATree*, int on);
+void db_catree_debug_random_split_join(DbTableCATree*, int on);
+
+typedef struct {
+ Uint route_nodes;
+ Uint base_nodes;
+ Uint max_depth;
+} DbCATreeStats;
+void db_calc_stats_catree(DbTableCATree*, DbCATreeStats*);
+
+
+#endif /* _DB_CATREE_H */
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 42d7909a08..ceaccf7e44 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -85,6 +85,14 @@
#include "erl_db_hash.h"
+#define ADD_NITEMS(DB, TO_ADD) \
+ erts_flxctr_add(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID, TO_ADD)
+#define INC_NITEMS(DB) \
+ erts_flxctr_inc_read_centralized(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID)
+#define DEC_NITEMS(DB) \
+ erts_flxctr_dec_read_centralized(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID)
+#define RESET_NITEMS(DB) \
+ erts_flxctr_reset(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID)
/*
* The following symbols can be manipulated to "tune" the linear hash array
*/
@@ -121,7 +129,9 @@
: ((struct segment**) erts_atomic_read_nob(&(tb)->segtab)))
#endif
#define NACTIVE(tb) ((int)erts_atomic_read_nob(&(tb)->nactive))
-#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems))
+#define NITEMS(tb) \
+ ((Sint)erts_flxctr_read_centralized(&(tb)->common.counters, \
+ ERTS_DB_TABLE_NITEMS_COUNTER_ID))
#define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP)
@@ -404,26 +414,31 @@ static int db_slot_hash(Process *p, DbTable *tbl,
static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
- int reverse, Eterm *ret);
+ int reverse, Eterm *ret, enum DbIterSafety);
static int db_select_hash(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, int reverse, Eterm *ret);
+ Eterm pattern, int reverse, Eterm *ret,
+ enum DbIterSafety);
static int db_select_continue_hash(Process *p, DbTable *tbl,
- Eterm continuation, Eterm *ret);
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret);
+ Eterm pattern, Eterm *ret, enum DbIterSafety);
static int db_select_count_continue_hash(Process *p, DbTable *tbl,
- Eterm continuation, Eterm *ret);
-
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret);
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety);
static int db_select_delete_continue_hash(Process *p, DbTable *tbl,
- Eterm continuation, Eterm *ret);
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret);
+ Eterm pattern, Eterm *ret, enum DbIterSafety);
static int db_select_replace_continue_hash(Process *p, DbTable *tbl,
- Eterm continuation, Eterm *ret);
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
static int db_take_hash(Process *, DbTable *, Eterm, Eterm *);
static void db_print_hash(fmtfn_t to,
@@ -439,7 +454,12 @@ static void db_foreach_offheap_hash(DbTable *,
void (*)(ErlOffHeap *, void *),
void *);
-static SWord db_delete_all_objects_hash(Process* p, DbTable* tbl, SWord reds);
+static SWord db_delete_all_objects_hash(Process* p,
+ DbTable* tbl,
+ SWord reds,
+ Eterm* nitems_holder_wb);
+static Eterm db_delete_all_objects_get_nitems_from_holder_hash(Process* p,
+ Eterm nitems_holder);
#ifdef HARDDEBUG
static void db_check_table_hash(DbTableHash *tb);
#endif
@@ -535,7 +555,7 @@ DbTableMethod db_hash =
db_select_chunk_hash,
db_select_hash,
db_select_delete_hash,
- db_select_continue_hash, /* hmm continue_hash? */
+ db_select_continue_hash,
db_select_delete_continue_hash,
db_select_count_hash,
db_select_count_continue_hash,
@@ -543,6 +563,7 @@ DbTableMethod db_hash =
db_select_replace_continue_hash,
db_take_hash,
db_delete_all_objects_hash,
+ db_delete_all_objects_get_nitems_from_holder_hash,
db_free_empty_table_hash,
db_free_table_continue_hash,
db_print_hash,
@@ -801,7 +822,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
if (tb->common.status & DB_SET) {
HashDbTerm* bnext = b->next;
if (is_pseudo_deleted(b)) {
- erts_atomic_inc_nob(&tb->common.nitems);
+ INC_NITEMS(tb);
b->pseudo_deleted = 0;
}
else if (key_clash_fail) {
@@ -830,7 +851,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
do {
if (db_eq(&tb->common,obj,&q->dbterm)) {
if (is_pseudo_deleted(q)) {
- erts_atomic_inc_nob(&tb->common.nitems);
+ INC_NITEMS(tb);
q->pseudo_deleted = 0;
ASSERT(q->hvalue == hval);
if (q != b) { /* must move to preserve key insertion order */
@@ -853,7 +874,7 @@ Lnew:
q->pseudo_deleted = 0;
q->next = b;
*bp = q;
- nitems = erts_atomic_inc_read_nob(&tb->common.nitems);
+ nitems = INC_NITEMS(tb);
WUNLOCK_HASH(lck);
{
int nactive = NACTIVE(tb);
@@ -1051,7 +1072,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ ADD_NITEMS(tb, nitems_diff);
try_shrink(tb);
}
free_term_list(tb, free_us);
@@ -1112,7 +1133,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ ADD_NITEMS(tb, nitems_diff);
try_shrink(tb);
}
free_term_list(tb, free_us);
@@ -1154,8 +1175,9 @@ static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
* Match traversal callbacks
*/
-typedef struct match_callbacks_t_ match_callbacks_t;
-struct match_callbacks_t_
+
+typedef struct traverse_context_t_ traverse_context_t;
+struct traverse_context_t_
{
/* Called when no match is possible.
* context_ptr: Pointer to context
@@ -1163,7 +1185,7 @@ struct match_callbacks_t_
*
* Both the direct return value and 'ret' are used as the traversal function return values.
*/
- int (*on_nothing_can_match)(match_callbacks_t* ctx, Eterm* ret);
+ int (*on_nothing_can_match)(traverse_context_t* ctx, Eterm* ret);
/* Called for each match result.
* context_ptr: Pointer to context
@@ -1174,7 +1196,7 @@ struct match_callbacks_t_
*
* Should return 1 for successful match, 0 otherwise.
*/
- int (*on_match_res)(match_callbacks_t* ctx, Sint slot_ix,
+ int (*on_match_res)(traverse_context_t* ctx, Sint slot_ix,
HashDbTerm*** current_ptr_ptr, Eterm match_res);
/* Called when either we've matched enough elements in this cycle or EOT was reached.
@@ -1188,7 +1210,7 @@ struct match_callbacks_t_
* Both the direct return value and 'ret' are used as the traversal function return values.
* If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
*/
- int (*on_loop_ended)(match_callbacks_t* ctx, Sint slot_ix, Sint got,
+ int (*on_loop_ended)(traverse_context_t* ctx, Sint slot_ix, Sint got,
Sint iterations_left, Binary** mpp, Eterm* ret);
/* Called when it's time to trap
@@ -1201,16 +1223,21 @@ struct match_callbacks_t_
* Both the direct return value and 'ret' are used as the traversal function return values.
* If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
*/
- int (*on_trap)(match_callbacks_t* ctx, Sint slot_ix, Sint got, Binary** mpp,
+ int (*on_trap)(traverse_context_t* ctx, Sint slot_ix, Sint got, Binary** mpp,
Eterm* ret);
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* prev_continuation_tptr;
+ enum DbIterSafety safety;
};
/*
* Begin hash table match traversal
*/
-static int match_traverse(Process* p, DbTableHash* tb,
+static int match_traverse(traverse_context_t* ctx,
Eterm pattern,
extra_match_validator_t extra_match_validator, /* Optional */
Sint chunk_size, /* If 0, no chunking */
@@ -1218,9 +1245,9 @@ static int match_traverse(Process* p, DbTableHash* tb,
Eterm** hpp, /* Heap */
int lock_for_write, /* Set to 1 if we're going to delete or
modify existing terms */
- match_callbacks_t* ctx,
Eterm* ret)
{
+ DbTableHash* tb = ctx->tb;
Sint slot_ix; /* Slot index */
HashDbTerm** current_ptr; /* Refers to either the bucket pointer or
* the 'next' pointer in the previous term
@@ -1287,7 +1314,7 @@ static int match_traverse(Process* p, DbTableHash* tb,
for(;;) {
if (*current_ptr != NULL) {
if (!is_pseudo_deleted(*current_ptr)) {
- match_res = db_match_dbterm(&tb->common, p, mpi.mp,
+ match_res = db_match_dbterm(&tb->common, ctx->p, mpi.mp,
&(*current_ptr)->dbterm, hpp, 2);
saved_current = *current_ptr;
if (ctx->on_match_res(ctx, slot_ix, &current_ptr, match_res)) {
@@ -1352,7 +1379,7 @@ done:
/*
* Continue hash table match traversal
*/
-static int match_traverse_continue(Process* p, DbTableHash* tb,
+static int match_traverse_continue(traverse_context_t* ctx,
Sint chunk_size, /* If 0, no chunking */
Sint iterations_left, /* Nr. of iterations left */
Eterm** hpp, /* Heap */
@@ -1361,9 +1388,9 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
Binary** mpp, /* Existing match program */
int lock_for_write, /* Set to 1 if we're going to delete or
modify existing terms */
- match_callbacks_t* ctx,
Eterm* ret)
{
+ DbTableHash* tb = ctx->tb;
HashDbTerm** current_ptr; /* Refers to either the bucket pointer or
* the 'next' pointer in the previous term
*/
@@ -1406,7 +1433,7 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
for(;;) {
if (*current_ptr != NULL) {
if (!is_pseudo_deleted(*current_ptr)) {
- match_res = db_match_dbterm(&tb->common, p, *mpp,
+ match_res = db_match_dbterm(&tb->common, ctx->p, *mpp,
&(*current_ptr)->dbterm, hpp, 2);
saved_current = *current_ptr;
if (ctx->on_match_res(ctx, slot_ix, &current_ptr, match_res)) {
@@ -1456,52 +1483,50 @@ done:
*/
static ERTS_INLINE int on_simple_trap(Export* trap_function,
- Process* p,
- DbTableHash* tb,
- Eterm tid,
- Eterm* prev_continuation_tptr,
- Sint slot_ix,
- Sint got,
- Binary** mpp,
- Eterm* ret)
+ traverse_context_t* ctx,
+ Sint slot_ix,
+ Sint got,
+ Binary** mpp,
+ Eterm* ret)
{
Eterm* hp;
Eterm egot;
Eterm mpb;
Eterm continuation;
- int is_first_trap = (prev_continuation_tptr == NULL);
+ int is_first_trap = (ctx->prev_continuation_tptr == NULL);
size_t base_halloc_sz = (is_first_trap ? ERTS_MAGIC_REF_THING_SIZE : 0);
- BUMP_ALL_REDS(p);
+ BUMP_ALL_REDS(ctx->p);
if (IS_USMALL(0, got)) {
- hp = HAllocX(p, base_halloc_sz + 5, ERTS_MAGIC_REF_THING_SIZE);
+ hp = HAllocX(ctx->p, base_halloc_sz + 6, ERTS_MAGIC_REF_THING_SIZE);
egot = make_small(got);
}
else {
- hp = HAllocX(p, base_halloc_sz + BIG_UINT_HEAP_SIZE + 5,
+ hp = HAllocX(ctx->p, base_halloc_sz + BIG_UINT_HEAP_SIZE + 6,
ERTS_MAGIC_REF_THING_SIZE);
egot = uint_to_big(got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
if (is_first_trap) {
- if (is_atom(tid))
- tid = erts_db_make_tid(p, &tb->common);
- mpb = erts_db_make_match_prog_ref(p, *mpp, &hp);
+ if (is_atom(ctx->tid))
+ ctx->tid = erts_db_make_tid(ctx->p, &ctx->tb->common);
+ mpb = erts_db_make_match_prog_ref(ctx->p, *mpp, &hp);
*mpp = NULL; /* otherwise the caller will destroy it */
}
else {
- ASSERT(!is_atom(tid));
- mpb = prev_continuation_tptr[3];
+ ASSERT(!is_atom(ctx->tid));
+ mpb = ctx->prev_continuation_tptr[3];
}
- continuation = TUPLE4(
+ continuation = TUPLE5(
hp,
- tid,
+ ctx->tid,
make_small(slot_ix),
mpb,
- egot);
- ERTS_BIF_PREP_TRAP1(*ret, trap_function, p, continuation);
+ egot,
+ make_small(ctx->safety));
+ ERTS_BIF_PREP_TRAP1(*ret, trap_function, ctx->p, continuation);
return DB_ERROR_NONE;
}
@@ -1510,17 +1535,18 @@ static ERTS_INLINE int unpack_simple_continuation(Eterm continuation,
Eterm* tid_ptr,
Sint* slot_ix_p,
Binary** mpp,
- Sint* got_p)
+ Sint* got_p,
+ enum DbIterSafety* safety_p)
{
Eterm* tptr;
ASSERT(is_tuple(continuation));
tptr = tuple_val(continuation);
- if (arityval(*tptr) != 4)
+ if (*tptr != make_arityval(5))
return 1;
- if (! is_small(tptr[2]) || !(is_big(tptr[4]) || is_small(tptr[4]))) {
+ if (!is_small(tptr[2]) || !(is_big(tptr[4]) || is_small(tptr[4]))
+ || !is_small(tptr[5]))
return 1;
- }
*tptr_ptr = tptr;
*tid_ptr = tptr[1];
@@ -1532,6 +1558,7 @@ static ERTS_INLINE int unpack_simple_continuation(Eterm continuation,
else {
*got_p = unsigned_val(tptr[4]);
}
+ *safety_p = signed_val(tptr[5]);
return 0;
}
@@ -1545,24 +1572,20 @@ static ERTS_INLINE int unpack_simple_continuation(Eterm continuation,
#define MAX_SELECT_CHUNK_ITERATIONS 1000
typedef struct {
- match_callbacks_t base;
- Process* p;
- DbTableHash* tb;
- Eterm tid;
+ traverse_context_t base;
Eterm* hp;
Sint chunk_size;
Eterm match_list;
- Eterm* prev_continuation_tptr;
} select_chunk_context_t;
-static int select_chunk_on_nothing_can_match(match_callbacks_t* ctx_base, Eterm* ret)
+static int select_chunk_on_nothing_can_match(traverse_context_t* ctx_base, Eterm* ret)
{
select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
*ret = (ctx->chunk_size > 0 ? am_EOT : NIL);
return DB_ERROR_NONE;
}
-static int select_chunk_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+static int select_chunk_on_match_res(traverse_context_t* ctx_base, Sint slot_ix,
HashDbTerm*** current_ptr_ptr,
Eterm match_res)
{
@@ -1574,7 +1597,7 @@ static int select_chunk_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
return 0;
}
-static int select_chunk_on_loop_ended(match_callbacks_t* ctx_base,
+static int select_chunk_on_loop_ended(traverse_context_t* ctx_base,
Sint slot_ix, Sint got,
Sint iterations_left, Binary** mpp,
Eterm* ret)
@@ -1590,7 +1613,7 @@ static int select_chunk_on_loop_ended(match_callbacks_t* ctx_base,
}
else {
ASSERT(iterations_left < MAX_SELECT_CHUNK_ITERATIONS);
- BUMP_REDS(ctx->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ BUMP_REDS(ctx->base.p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
if (ctx->chunk_size) {
Eterm continuation;
Eterm rest = NIL;
@@ -1609,14 +1632,14 @@ static int select_chunk_on_loop_ended(match_callbacks_t* ctx_base,
been in 'user space' */
}
if (rest != NIL || slot_ix >= 0) { /* Need more calls */
- Eterm tid = ctx->tid;
- ctx->hp = HAllocX(ctx->p,
+ Eterm tid = ctx->base.tid;
+ ctx->hp = HAllocX(ctx->base.p,
3 + 7 + ERTS_MAGIC_REF_THING_SIZE,
ERTS_MAGIC_REF_THING_SIZE);
- mpb = erts_db_make_match_prog_ref(ctx->p, *mpp, &ctx->hp);
+ mpb = erts_db_make_match_prog_ref(ctx->base.p, *mpp, &ctx->hp);
if (is_atom(tid))
- tid = erts_db_make_tid(ctx->p,
- &ctx->tb->common);
+ tid = erts_db_make_tid(ctx->base.p,
+ &ctx->base.tb->common);
continuation = TUPLE6(
ctx->hp,
tid,
@@ -1631,7 +1654,7 @@ static int select_chunk_on_loop_ended(match_callbacks_t* ctx_base,
} else { /* All data is exhausted */
if (ctx->match_list != NIL) { /* No more data to search but still a
result to return to the caller */
- ctx->hp = HAlloc(ctx->p, 3);
+ ctx->hp = HAlloc(ctx->base.p, 3);
*ret = TUPLE2(ctx->hp, ctx->match_list, am_EOT);
return DB_ERROR_NONE;
} else { /* Reached the end of the ttable with no data to return */
@@ -1645,7 +1668,7 @@ static int select_chunk_on_loop_ended(match_callbacks_t* ctx_base,
}
}
-static int select_chunk_on_trap(match_callbacks_t* ctx_base,
+static int select_chunk_on_trap(traverse_context_t* ctx_base,
Sint slot_ix, Sint got,
Binary** mpp, Eterm* ret)
{
@@ -1654,74 +1677,77 @@ static int select_chunk_on_trap(match_callbacks_t* ctx_base,
Eterm continuation;
Eterm* hp;
- BUMP_ALL_REDS(ctx->p);
+ BUMP_ALL_REDS(ctx->base.p);
- if (ctx->prev_continuation_tptr == NULL) {
- Eterm tid = ctx->tid;
+ if (ctx->base.prev_continuation_tptr == NULL) {
+ Eterm tid = ctx->base.tid;
/* First time we're trapping */
- hp = HAllocX(ctx->p, 7 + ERTS_MAGIC_REF_THING_SIZE,
+ hp = HAllocX(ctx->base.p, 8 + ERTS_MAGIC_REF_THING_SIZE,
ERTS_MAGIC_REF_THING_SIZE);
if (is_atom(tid))
- tid = erts_db_make_tid(ctx->p, &ctx->tb->common);
- mpb = erts_db_make_match_prog_ref(ctx->p, *mpp, &hp);
- continuation = TUPLE6(
+ tid = erts_db_make_tid(ctx->base.p, &ctx->base.tb->common);
+ mpb = erts_db_make_match_prog_ref(ctx->base.p, *mpp, &hp);
+ continuation = TUPLE7(
hp,
tid,
make_small(slot_ix),
make_small(ctx->chunk_size),
mpb,
ctx->match_list,
- make_small(got));
+ make_small(got),
+ make_small(ctx->base.safety));
*mpp = NULL; /* otherwise the caller will destroy it */
}
else {
/* Not the first time we're trapping; reuse continuation terms */
- hp = HAlloc(ctx->p, 7);
- continuation = TUPLE6(
+ hp = HAlloc(ctx->base.p, 8);
+ continuation = TUPLE7(
hp,
- ctx->prev_continuation_tptr[1],
+ ctx->base.prev_continuation_tptr[1],
make_small(slot_ix),
- ctx->prev_continuation_tptr[3],
- ctx->prev_continuation_tptr[4],
+ ctx->base.prev_continuation_tptr[3],
+ ctx->base.prev_continuation_tptr[4],
ctx->match_list,
- make_small(got));
+ make_small(got),
+ make_small(ctx->base.safety));
}
- ERTS_BIF_PREP_TRAP1(*ret, &ets_select_continue_exp, ctx->p,
+ ERTS_BIF_PREP_TRAP1(*ret, &ets_select_continue_exp, ctx->base.p,
continuation);
return DB_ERROR_NONE;
}
static int db_select_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern,
- int reverse, Eterm *ret)
+ int reverse, Eterm *ret, enum DbIterSafety safety)
{
- return db_select_chunk_hash(p, tbl, tid, pattern, 0, reverse, ret);
+ return db_select_chunk_hash(p, tbl, tid, pattern, 0, reverse, ret, safety);
}
static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
- int reverse, Eterm *ret)
+ int reverse, Eterm *ret, enum DbIterSafety safety)
{
select_chunk_context_t ctx;
ctx.base.on_nothing_can_match = select_chunk_on_nothing_can_match;
ctx.base.on_match_res = select_chunk_on_match_res;
ctx.base.on_loop_ended = select_chunk_on_loop_ended;
- ctx.base.on_trap = select_chunk_on_trap,
- ctx.p = p;
- ctx.tb = &tbl->hash;
- ctx.tid = tid;
+ ctx.base.on_trap = select_chunk_on_trap;
+ ctx.base.p = p;
+ ctx.base.tb = &tbl->hash;
+ ctx.base.tid = tid;
+ ctx.base.prev_continuation_tptr = NULL;
+ ctx.base.safety = safety;
ctx.hp = NULL;
ctx.chunk_size = chunk_size;
ctx.match_list = NIL;
- ctx.prev_continuation_tptr = NULL;
return match_traverse(
- ctx.p, ctx.tb,
+ &ctx.base,
pattern, NULL,
ctx.chunk_size,
MAX_SELECT_CHUNK_ITERATIONS,
&ctx.hp, 0,
- &ctx.base, ret);
+ ret);
}
/*
@@ -1731,7 +1757,7 @@ static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid,
*/
static
-int select_chunk_continue_on_loop_ended(match_callbacks_t* ctx_base,
+int select_chunk_continue_on_loop_ended(traverse_context_t* ctx_base,
Sint slot_ix, Sint got,
Sint iterations_left, Binary** mpp,
Eterm* ret)
@@ -1742,14 +1768,14 @@ int select_chunk_continue_on_loop_ended(match_callbacks_t* ctx_base,
Eterm* hp;
ASSERT(iterations_left <= MAX_SELECT_CHUNK_ITERATIONS);
- BUMP_REDS(ctx->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ BUMP_REDS(ctx->base.p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
if (ctx->chunk_size) {
Sint rest_size = 0;
if (got > ctx->chunk_size) {
/* Cannot write destructively here,
the list may have
been in user space */
- hp = HAlloc(ctx->p, (got - ctx->chunk_size) * 2);
+ hp = HAlloc(ctx->base.p, (got - ctx->chunk_size) * 2);
while (got-- > ctx->chunk_size) {
rest = CONS(hp, CAR(list_val(ctx->match_list)), rest);
hp += 2;
@@ -1758,13 +1784,13 @@ int select_chunk_continue_on_loop_ended(match_callbacks_t* ctx_base,
}
}
if (rest != NIL || slot_ix >= 0) {
- hp = HAlloc(ctx->p, 3 + 7);
+ hp = HAlloc(ctx->base.p, 3 + 7);
continuation = TUPLE6(
hp,
- ctx->prev_continuation_tptr[1],
+ ctx->base.prev_continuation_tptr[1],
make_small(slot_ix),
- ctx->prev_continuation_tptr[3],
- ctx->prev_continuation_tptr[4],
+ ctx->base.prev_continuation_tptr[3],
+ ctx->base.prev_continuation_tptr[4],
rest,
make_small(rest_size));
hp += 7;
@@ -1772,7 +1798,7 @@ int select_chunk_continue_on_loop_ended(match_callbacks_t* ctx_base,
return DB_ERROR_NONE;
} else {
if (ctx->match_list != NIL) {
- hp = HAlloc(ctx->p, 3);
+ hp = HAlloc(ctx->base.p, 3);
*ret = TUPLE2(hp, ctx->match_list, am_EOT);
return DB_ERROR_NONE;
} else {
@@ -1786,10 +1812,11 @@ int select_chunk_continue_on_loop_ended(match_callbacks_t* ctx_base,
}
/*
- * This is called when select traps
+ * This is called when ets:select/1/2/3 traps
+ * and for ets:select/1 with user continuation term.
*/
static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation,
- Eterm* ret)
+ Eterm* ret, enum DbIterSafety* safety_p)
{
select_chunk_context_t ctx;
Eterm* tptr;
@@ -1805,7 +1832,13 @@ static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation,
ASSERT(is_tuple(continuation));
tptr = tuple_val(continuation);
- if (arityval(*tptr) != 6)
+ /*
+ * 6-tuple is select/1 user continuation term
+ * 7-tuple is select trap continuation
+ */
+ if (*tptr == make_arityval(7) && is_small(tptr[7]))
+ *safety_p = signed_val(tptr[7]);
+ else if (*tptr != make_arityval(6))
goto badparam;
if (!is_small(tptr[2]) || !is_small(tptr[3]) ||
@@ -1829,18 +1862,19 @@ static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation,
ctx.base.on_match_res = select_chunk_on_match_res;
ctx.base.on_loop_ended = select_chunk_continue_on_loop_ended;
ctx.base.on_trap = select_chunk_on_trap;
- ctx.p = p;
- ctx.tb = &tbl->hash;
- ctx.tid = tid;
+ ctx.base.p = p;
+ ctx.base.tb = &tbl->hash;
+ ctx.base.tid = tid;
+ ctx.base.prev_continuation_tptr = tptr;
+ ctx.base.safety = *safety_p;
ctx.hp = NULL;
ctx.chunk_size = chunk_size;
ctx.match_list = match_list;
- ctx.prev_continuation_tptr = tptr;
return match_traverse_continue(
- ctx.p, ctx.tb, ctx.chunk_size,
- iterations_left, &ctx.hp, slot_ix, got, &mp, 0,
- &ctx.base, ret);
+ &ctx.base, ctx.chunk_size,
+ iterations_left, &ctx.hp, slot_ix, got, &mp, 0,
+ ret);
badparam:
*ret = NIL;
@@ -1858,84 +1892,73 @@ badparam:
#define MAX_SELECT_COUNT_ITERATIONS 1000
-typedef struct {
- match_callbacks_t base;
- Process* p;
- DbTableHash* tb;
- Eterm tid;
- Eterm* prev_continuation_tptr;
-} select_count_context_t;
-
-static int select_count_on_nothing_can_match(match_callbacks_t* ctx_base,
+static int select_count_on_nothing_can_match(traverse_context_t* ctx_base,
Eterm* ret)
{
*ret = make_small(0);
return DB_ERROR_NONE;
}
-static int select_count_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+static int select_count_on_match_res(traverse_context_t* ctx_base, Sint slot_ix,
HashDbTerm*** current_ptr_ptr,
Eterm match_res)
{
return (match_res == am_true);
}
-static int select_count_on_loop_ended(match_callbacks_t* ctx_base,
+static int select_count_on_loop_ended(traverse_context_t* ctx,
Sint slot_ix, Sint got,
Sint iterations_left, Binary** mpp,
Eterm* ret)
{
- select_count_context_t* ctx = (select_count_context_t*) ctx_base;
ASSERT(iterations_left <= MAX_SELECT_COUNT_ITERATIONS);
BUMP_REDS(ctx->p, MAX_SELECT_COUNT_ITERATIONS - iterations_left);
*ret = erts_make_integer(got, ctx->p);
return DB_ERROR_NONE;
}
-static int select_count_on_trap(match_callbacks_t* ctx_base,
+static int select_count_on_trap(traverse_context_t* ctx,
Sint slot_ix, Sint got,
Binary** mpp, Eterm* ret)
{
- select_count_context_t* ctx = (select_count_context_t*) ctx_base;
return on_simple_trap(
- &ets_select_count_continue_exp,
- ctx->p,
- ctx->tb,
- ctx->tid,
- ctx->prev_continuation_tptr,
+ &ets_select_count_continue_exp, ctx,
slot_ix, got, mpp, ret);
}
static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret)
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety safety)
{
- select_count_context_t ctx;
+ traverse_context_t ctx;
Sint iterations_left = MAX_SELECT_COUNT_ITERATIONS;
Sint chunk_size = 0;
- ctx.base.on_nothing_can_match = select_count_on_nothing_can_match;
- ctx.base.on_match_res = select_count_on_match_res;
- ctx.base.on_loop_ended = select_count_on_loop_ended;
- ctx.base.on_trap = select_count_on_trap;
+ ctx.on_nothing_can_match = select_count_on_nothing_can_match;
+ ctx.on_match_res = select_count_on_match_res;
+ ctx.on_loop_ended = select_count_on_loop_ended;
+ ctx.on_trap = select_count_on_trap;
ctx.p = p;
ctx.tb = &tbl->hash;
ctx.tid = tid;
ctx.prev_continuation_tptr = NULL;
+ ctx.safety = safety;
return match_traverse(
- ctx.p, ctx.tb,
+ &ctx,
pattern, NULL,
chunk_size, iterations_left, NULL, 0,
- &ctx.base, ret);
+ ret);
}
/*
* This is called when select_count traps
*/
static int db_select_count_continue_hash(Process* p, DbTable* tbl,
- Eterm continuation, Eterm* ret)
+ Eterm continuation, Eterm* ret,
+ enum DbIterSafety* safety_p)
{
- select_count_context_t ctx;
+ traverse_context_t ctx;
Eterm* tptr;
Eterm tid;
Binary* mp;
@@ -1944,24 +1967,26 @@ static int db_select_count_continue_hash(Process* p, DbTable* tbl,
Sint chunk_size = 0;
*ret = NIL;
- if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp,
+ &got, safety_p)) {
*ret = NIL;
return DB_ERROR_BADPARAM;
}
- ctx.base.on_match_res = select_count_on_match_res;
- ctx.base.on_loop_ended = select_count_on_loop_ended;
- ctx.base.on_trap = select_count_on_trap;
+ ctx.on_match_res = select_count_on_match_res;
+ ctx.on_loop_ended = select_count_on_loop_ended;
+ ctx.on_trap = select_count_on_trap;
ctx.p = p;
ctx.tb = &tbl->hash;
ctx.tid = tid;
ctx.prev_continuation_tptr = tptr;
+ ctx.safety = *safety_p;
return match_traverse_continue(
- ctx.p, ctx.tb, chunk_size,
+ &ctx, chunk_size,
MAX_SELECT_COUNT_ITERATIONS,
NULL, slot_ix, got, &mp, 0,
- &ctx.base, ret);
+ ret);
}
#undef MAX_SELECT_COUNT_ITERATIONS
@@ -1976,24 +2001,20 @@ static int db_select_count_continue_hash(Process* p, DbTable* tbl,
#define MAX_SELECT_DELETE_ITERATIONS 1000
typedef struct {
- match_callbacks_t base;
- Process* p;
- DbTableHash* tb;
- Eterm tid;
- Eterm* prev_continuation_tptr;
+ traverse_context_t base;
erts_aint_t fixated_by_me;
Uint last_pseudo_delete;
HashDbTerm* free_us;
} select_delete_context_t;
-static int select_delete_on_nothing_can_match(match_callbacks_t* ctx_base,
+static int select_delete_on_nothing_can_match(traverse_context_t* ctx_base,
Eterm* ret)
{
*ret = make_small(0);
return DB_ERROR_NONE;
}
-static int select_delete_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+static int select_delete_on_match_res(traverse_context_t* ctx_base, Sint slot_ix,
HashDbTerm*** current_ptr_ptr,
Eterm match_res)
{
@@ -2003,9 +2024,9 @@ static int select_delete_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
if (match_res != am_true)
return 0;
- if (NFIXED(ctx->tb) > ctx->fixated_by_me) { /* fixated by others? */
+ if (NFIXED(ctx->base.tb) > ctx->fixated_by_me) { /* fixated by others? */
if (slot_ix != ctx->last_pseudo_delete) {
- if (!add_fixed_deletion(ctx->tb, slot_ix, ctx->fixated_by_me))
+ if (!add_fixed_deletion(ctx->base.tb, slot_ix, ctx->fixated_by_me))
goto do_erase;
ctx->last_pseudo_delete = slot_ix;
}
@@ -2018,46 +2039,43 @@ static int select_delete_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
del->next = ctx->free_us;
ctx->free_us = del;
}
- erts_atomic_dec_nob(&ctx->tb->common.nitems);
+ DEC_NITEMS(ctx->base.tb);
return 1;
}
-static int select_delete_on_loop_ended(match_callbacks_t* ctx_base,
+static int select_delete_on_loop_ended(traverse_context_t* ctx_base,
Sint slot_ix, Sint got,
Sint iterations_left, Binary** mpp,
Eterm* ret)
{
select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
- free_term_list(ctx->tb, ctx->free_us);
+ free_term_list(ctx->base.tb, ctx->free_us);
ctx->free_us = NULL;
ASSERT(iterations_left <= MAX_SELECT_DELETE_ITERATIONS);
- BUMP_REDS(ctx->p, MAX_SELECT_DELETE_ITERATIONS - iterations_left);
+ BUMP_REDS(ctx->base.p, MAX_SELECT_DELETE_ITERATIONS - iterations_left);
if (got) {
- try_shrink(ctx->tb);
+ try_shrink(ctx->base.tb);
}
- *ret = erts_make_integer(got, ctx->p);
+ *ret = erts_make_integer(got, ctx->base.p);
return DB_ERROR_NONE;
}
-static int select_delete_on_trap(match_callbacks_t* ctx_base,
+static int select_delete_on_trap(traverse_context_t* ctx_base,
Sint slot_ix, Sint got,
Binary** mpp, Eterm* ret)
{
select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
- free_term_list(ctx->tb, ctx->free_us);
+ free_term_list(ctx->base.tb, ctx->free_us);
ctx->free_us = NULL;
return on_simple_trap(
- &ets_select_delete_continue_exp,
- ctx->p,
- ctx->tb,
- ctx->tid,
- ctx->prev_continuation_tptr,
+ &ets_select_delete_continue_exp, &ctx->base,
slot_ix, got, mpp, ret);
}
static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret)
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety safety)
{
select_delete_context_t ctx;
Sint chunk_size = 0;
@@ -2066,27 +2084,29 @@ static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid,
ctx.base.on_match_res = select_delete_on_match_res;
ctx.base.on_loop_ended = select_delete_on_loop_ended;
ctx.base.on_trap = select_delete_on_trap;
- ctx.p = p;
- ctx.tb = &tbl->hash;
- ctx.tid = tid;
- ctx.prev_continuation_tptr = NULL;
- ctx.fixated_by_me = ctx.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */
+ ctx.base.p = p;
+ ctx.base.tb = &tbl->hash;
+ ctx.base.tid = tid;
+ ctx.base.prev_continuation_tptr = NULL;
+ ctx.base.safety = safety;
+ ctx.fixated_by_me = ctx.base.tb->common.is_thread_safe ? 0 : 1;
ctx.last_pseudo_delete = (Uint) -1;
ctx.free_us = NULL;
return match_traverse(
- ctx.p, ctx.tb,
+ &ctx.base,
pattern, NULL,
chunk_size,
MAX_SELECT_DELETE_ITERATIONS, NULL, 1,
- &ctx.base, ret);
+ ret);
}
/*
* This is called when select_delete traps
*/
static int db_select_delete_continue_hash(Process* p, DbTable* tbl,
- Eterm continuation, Eterm* ret)
+ Eterm continuation, Eterm* ret,
+ enum DbIterSafety* safety_p)
{
select_delete_context_t ctx;
Eterm* tptr;
@@ -2096,7 +2116,8 @@ static int db_select_delete_continue_hash(Process* p, DbTable* tbl,
Sint slot_ix;
Sint chunk_size = 0;
- if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp,
+ &got, safety_p)) {
*ret = NIL;
return DB_ERROR_BADPARAM;
}
@@ -2104,19 +2125,20 @@ static int db_select_delete_continue_hash(Process* p, DbTable* tbl,
ctx.base.on_match_res = select_delete_on_match_res;
ctx.base.on_loop_ended = select_delete_on_loop_ended;
ctx.base.on_trap = select_delete_on_trap;
- ctx.p = p;
- ctx.tb = &tbl->hash;
- ctx.tid = tid;
- ctx.prev_continuation_tptr = tptr;
- ctx.fixated_by_me = ONLY_WRITER(p, ctx.tb) ? 0 : 1; /* TODO: something nicer */
+ ctx.base.p = p;
+ ctx.base.tb = &tbl->hash;
+ ctx.base.tid = tid;
+ ctx.base.prev_continuation_tptr = tptr;
+ ctx.base.safety = *safety_p;
+ ctx.fixated_by_me = ONLY_WRITER(p, ctx.base.tb) ? 0 : 1;
ctx.last_pseudo_delete = (Uint) -1;
ctx.free_us = NULL;
return match_traverse_continue(
- ctx.p, ctx.tb, chunk_size,
+ &ctx.base, chunk_size,
MAX_SELECT_DELETE_ITERATIONS,
NULL, slot_ix, got, &mp, 1,
- &ctx.base, ret);
+ ret);
}
#undef MAX_SELECT_DELETE_ITERATIONS
@@ -2130,26 +2152,17 @@ static int db_select_delete_continue_hash(Process* p, DbTable* tbl,
#define MAX_SELECT_REPLACE_ITERATIONS 1000
-typedef struct {
- match_callbacks_t base;
- Process* p;
- DbTableHash* tb;
- Eterm tid;
- Eterm* prev_continuation_tptr;
-} select_replace_context_t;
-
-static int select_replace_on_nothing_can_match(match_callbacks_t* ctx_base,
+static int select_replace_on_nothing_can_match(traverse_context_t* ctx_base,
Eterm* ret)
{
*ret = make_small(0);
return DB_ERROR_NONE;
}
-static int select_replace_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+static int select_replace_on_match_res(traverse_context_t* ctx, Sint slot_ix,
HashDbTerm*** current_ptr_ptr,
Eterm match_res)
{
- select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
DbTableHash* tb = ctx->tb;
HashDbTerm* new;
HashDbTerm* next;
@@ -2175,11 +2188,10 @@ static int select_replace_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix
return 0;
}
-static int select_replace_on_loop_ended(match_callbacks_t* ctx_base, Sint slot_ix,
+static int select_replace_on_loop_ended(traverse_context_t* ctx, Sint slot_ix,
Sint got, Sint iterations_left,
Binary** mpp, Eterm* ret)
{
- select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
ASSERT(iterations_left <= MAX_SELECT_REPLACE_ITERATIONS);
/* the more objects we've replaced, the more reductions we've consumed */
BUMP_REDS(ctx->p,
@@ -2189,23 +2201,20 @@ static int select_replace_on_loop_ended(match_callbacks_t* ctx_base, Sint slot_i
return DB_ERROR_NONE;
}
-static int select_replace_on_trap(match_callbacks_t* ctx_base,
+static int select_replace_on_trap(traverse_context_t* ctx,
Sint slot_ix, Sint got,
Binary** mpp, Eterm* ret)
{
- select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
return on_simple_trap(
- &ets_select_replace_continue_exp,
- ctx->p,
- ctx->tb,
- ctx->tid,
- ctx->prev_continuation_tptr,
+ &ets_select_replace_continue_exp, ctx,
slot_ix, got, mpp, ret);
}
-static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret)
+static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety safety)
{
- select_replace_context_t ctx;
+ traverse_context_t ctx;
Sint chunk_size = 0;
/* Bag implementation presented both semantic consistency and performance issues,
@@ -2213,29 +2222,32 @@ static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pat
*/
ASSERT(!(tbl->hash.common.status & DB_BAG));
- ctx.base.on_nothing_can_match = select_replace_on_nothing_can_match;
- ctx.base.on_match_res = select_replace_on_match_res;
- ctx.base.on_loop_ended = select_replace_on_loop_ended;
- ctx.base.on_trap = select_replace_on_trap;
+ ctx.on_nothing_can_match = select_replace_on_nothing_can_match;
+ ctx.on_match_res = select_replace_on_match_res;
+ ctx.on_loop_ended = select_replace_on_loop_ended;
+ ctx.on_trap = select_replace_on_trap;
ctx.p = p;
ctx.tb = &tbl->hash;
ctx.tid = tid;
ctx.prev_continuation_tptr = NULL;
+ ctx.safety = safety;
return match_traverse(
- ctx.p, ctx.tb,
+ &ctx,
pattern, db_match_keeps_key,
chunk_size,
MAX_SELECT_REPLACE_ITERATIONS, NULL, 1,
- &ctx.base, ret);
+ ret);
}
/*
* This is called when select_replace traps
*/
-static int db_select_replace_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret)
+static int db_select_replace_continue_hash(Process* p, DbTable* tbl,
+ Eterm continuation, Eterm* ret,
+ enum DbIterSafety* safety_p)
{
- select_replace_context_t ctx;
+ traverse_context_t ctx;
Eterm* tptr;
Eterm tid ;
Binary* mp;
@@ -2244,25 +2256,27 @@ static int db_select_replace_continue_hash(Process* p, DbTable* tbl, Eterm conti
Sint chunk_size = 0;
*ret = NIL;
- if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp,
+ &got, safety_p)) {
*ret = NIL;
return DB_ERROR_BADPARAM;
}
/* Proceed */
- ctx.base.on_match_res = select_replace_on_match_res;
- ctx.base.on_loop_ended = select_replace_on_loop_ended;
- ctx.base.on_trap = select_replace_on_trap;
+ ctx.on_match_res = select_replace_on_match_res;
+ ctx.on_loop_ended = select_replace_on_loop_ended;
+ ctx.on_trap = select_replace_on_trap;
ctx.p = p;
ctx.tb = &tbl->hash;
ctx.tid = tid;
ctx.prev_continuation_tptr = tptr;
+ ctx.safety = *safety_p;
return match_traverse_continue(
- ctx.p, ctx.tb, chunk_size,
+ &ctx, chunk_size,
MAX_SELECT_REPLACE_ITERATIONS,
NULL, slot_ix, got, &mp, 1,
- &ctx.base, ret);
+ ret);
}
@@ -2302,7 +2316,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ ADD_NITEMS(tb, nitems_diff);
try_shrink(tb);
}
free_term_list(tb, free_us);
@@ -2362,7 +2376,7 @@ static SWord db_mark_all_deleted_hash(DbTable *tbl, SWord reds)
fixdel->slot = NACTIVE(tb) - 1;
fixdel->all = 1;
fixdel->trap = 0;
- erts_atomic_set_nob(&tb->common.nitems, 0);
+ RESET_NITEMS(tb);
return loops < 0 ? 0 : loops / LOOPS_PER_REDUCTION;
}
@@ -2470,7 +2484,8 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
(void*)tb->locks, sizeof(DbTableHashFineLocks));
tb->locks = NULL;
}
- ASSERT(erts_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable));
+ ASSERT(sizeof(DbTable) == erts_flxctr_read_approx(&tb->common.counters,
+ ERTS_DB_TABLE_MEM_COUNTER_ID));
return reds; /* Done */
}
@@ -2723,13 +2738,9 @@ static int free_seg(DbTableHash *tb, int free_records)
* sure no lingering threads are still hanging in BUCKET macro
* with an old segtab pointer.
*/
- Uint sz = SIZEOF_EXT_SEGTAB(est->nsegs);
- ASSERT(sz == ERTS_ALC_DBG_BLK_SZ(est));
- ERTS_DB_ALC_MEM_UPDATE_(tb, sz, 0);
- erts_schedule_thr_prgr_later_cleanup_op(dealloc_ext_segtab,
- est,
- &est->lop,
- sz);
+ erts_schedule_db_free(&tb->common, dealloc_ext_segtab,
+ est, &est->lop,
+ SIZEOF_EXT_SEGTAB(est->nsegs));
}
else
erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est,
@@ -3086,7 +3097,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
ASSERT(q->hvalue == hval);
q->pseudo_deleted = 0;
*bp = b = q;
- erts_atomic_inc_nob(&tb->common.nitems);
+ INC_NITEMS(tb);
}
HRelease(p, hend, htop);
@@ -3099,7 +3110,7 @@ Ldone:
handle->dbterm = &b->dbterm;
handle->flags = flags;
handle->new_size = b->dbterm.size;
- handle->lck = lck;
+ handle->u.hash.lck = lck;
return 1;
}
@@ -3112,7 +3123,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
DbTableHash *tb = &tbl->hash;
HashDbTerm **bp = (HashDbTerm **) handle->bp;
HashDbTerm *b = *bp;
- erts_rwmtx_t* lck = (erts_rwmtx_t*) handle->lck;
+ erts_rwmtx_t* lck = handle->u.hash.lck;
HashDbTerm* free_me = NULL;
ERTS_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */
@@ -3129,7 +3140,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
}
WUNLOCK_HASH(lck);
- erts_atomic_dec_nob(&tb->common.nitems);
+ DEC_NITEMS(tb);
try_shrink(tb);
} else {
if (handle->flags & DB_MUST_RESIZE) {
@@ -3138,7 +3149,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
}
if (handle->flags & DB_INC_TRY_GROW) {
int nactive;
- int nitems = erts_atomic_inc_read_nob(&tb->common.nitems);
+ int nitems = INC_NITEMS(tb);
WUNLOCK_HASH(lck);
nactive = NACTIVE(tb);
@@ -3159,8 +3170,17 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
return;
}
-static SWord db_delete_all_objects_hash(Process* p, DbTable* tbl, SWord reds)
+static SWord db_delete_all_objects_hash(Process* p,
+ DbTable* tbl,
+ SWord reds,
+ Eterm* nitems_holder_wb)
{
+ if (nitems_holder_wb != NULL) {
+ Uint nr_of_items =
+ erts_flxctr_read_centralized(&tbl->common.counters,
+ ERTS_DB_TABLE_NITEMS_COUNTER_ID);
+ *nitems_holder_wb = erts_make_integer(nr_of_items, p);
+ }
if (IS_FIXED(tbl)) {
reds = db_mark_all_deleted_hash(tbl, reds);
} else {
@@ -3169,11 +3189,16 @@ static SWord db_delete_all_objects_hash(Process* p, DbTable* tbl, SWord reds)
return reds;
db_create_hash(p, tbl);
- erts_atomic_set_nob(&tbl->hash.common.nitems, 0);
+ RESET_NITEMS(tbl);
}
return reds;
}
+static Eterm db_delete_all_objects_get_nitems_from_holder_hash(Process* p,
+ Eterm nitems_holder){
+ return nitems_holder;
+}
+
void db_foreach_offheap_hash(DbTable *tbl,
void (*func)(ErlOffHeap *, void *),
void * arg)
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 8c5fc0acb2..492ea81b63 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -48,33 +48,23 @@
#include "erl_binary.h"
#include "erl_db_tree.h"
+#include "erl_db_tree_util.h"
#define GETKEY_WITH_POS(Keypos, Tplp) (*((Tplp) + Keypos))
-#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems))
-/*
-** A stack of this size is enough for an AVL tree with more than
-** 0xFFFFFFFF elements. May be subject to change if
-** the datatype of the element counter is changed to a 64 bit integer.
-** The Maximal height of an AVL tree is calculated as:
-** h(n) <= 1.4404 * log(n + 2) - 0.328
-** Where n denotes the number of nodes, h(n) the height of the tree
-** with n nodes and log is the binary logarithm.
-*/
-
-#define STACK_NEED 50
-#define TREE_MAX_ELEMENTS 0xFFFFFFFFUL
-
-#define PUSH_NODE(Dtt, Tdt) \
- ((Dtt)->array[(Dtt)->pos++] = Tdt)
-
-#define POP_NODE(Dtt) \
- (((Dtt)->pos) ? \
- (Dtt)->array[--((Dtt)->pos)] : NULL)
-
-#define TOP_NODE(Dtt) \
- ((Dtt->pos) ? \
- (Dtt)->array[(Dtt)->pos - 1] : NULL)
+#define NITEMS_CENTRALIZED(tb) \
+ ((Sint)erts_flxctr_read_centralized(&(tb)->common.counters, \
+ ERTS_DB_TABLE_NITEMS_COUNTER_ID))
+#define ADD_NITEMS(DB, TO_ADD) \
+ erts_flxctr_add(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID, TO_ADD)
+#define INC_NITEMS(DB) \
+ erts_flxctr_inc(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID)
+#define INC_NITEMS_CENTRALIZED(DB) \
+ erts_flxctr_inc_read_centralized(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID)
+#define RESET_NITEMS(DB) \
+ erts_flxctr_reset(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID)
+#define IS_CENTRALIZED_CTR(tb) (!(tb)->common.counters.is_decentralized)
+#define APPROX_MEM_CONSUMED(tb) erts_flxctr_read_approx(&(tb)->common.counters, ERTS_DB_TABLE_MEM_COUNTER_ID)
#define TOPN_NODE(Dtt, Pos) \
(((Pos) < Dtt->pos) ? \
@@ -89,10 +79,12 @@
/* Obtain table static stack if available. NULL if not.
** Must be released with release_stack()
*/
-static DbTreeStack* get_static_stack(DbTableTree* tb)
+ERTS_INLINE static DbTreeStack* get_static_stack(DbTableTree* tb)
{
- if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
- return &tb->static_stack;
+ if (tb != NULL) {
+ ASSERT(IS_TREE_TABLE(tb->common.type));
+ if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1))
+ return &tb->static_stack;
}
return NULL;
}
@@ -100,13 +92,15 @@ static DbTreeStack* get_static_stack(DbTableTree* tb)
/* Obtain static stack if available, otherwise empty dynamic stack.
** Must be released with release_stack()
*/
-static DbTreeStack* get_any_stack(DbTableTree* tb)
+static DbTreeStack* get_any_stack(DbTable* tb, DbTableTree* stack_container)
{
DbTreeStack* stack;
- if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
- return &tb->static_stack;
+ if (stack_container != NULL) {
+ ASSERT(IS_TREE_TABLE(stack_container->common.type));
+ if (!erts_atomic_xchg_acqb(&stack_container->is_stack_busy, 1))
+ return &stack_container->static_stack;
}
- stack = erts_db_alloc(ERTS_ALC_T_DB_STK, (DbTable *) tb,
+ stack = erts_db_alloc(ERTS_ALC_T_DB_STK, tb,
sizeof(DbTreeStack) + sizeof(TreeDbTerm*) * STACK_NEED);
stack->pos = 0;
stack->slot = 0;
@@ -114,62 +108,62 @@ static DbTreeStack* get_any_stack(DbTableTree* tb)
return stack;
}
-static void release_stack(DbTableTree* tb, DbTreeStack* stack)
+static void release_stack(DbTable* tb, DbTableTree* stack_container, DbTreeStack* stack)
{
- if (stack == &tb->static_stack) {
- ASSERT(erts_atomic_read_nob(&tb->is_stack_busy) == 1);
- erts_atomic_set_relb(&tb->is_stack_busy, 0);
- }
- else {
- erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb,
- (void *) stack, sizeof(DbTreeStack) + sizeof(TreeDbTerm*) * STACK_NEED);
+ if (stack_container != NULL) {
+ ASSERT(IS_TREE_TABLE(stack_container->common.type));
+ if (stack == &stack_container->static_stack) {
+ ASSERT(erts_atomic_read_nob(&stack_container->is_stack_busy) == 1);
+ erts_atomic_set_relb(&stack_container->is_stack_busy, 0);
+ return;
+ }
}
+ erts_db_free(ERTS_ALC_T_DB_STK, tb,
+ (void *) stack, sizeof(DbTreeStack) + sizeof(TreeDbTerm*) * STACK_NEED);
}
-static ERTS_INLINE void reset_static_stack(DbTableTree* tb)
+static ERTS_INLINE void reset_stack(DbTreeStack* stack)
{
- tb->static_stack.pos = 0;
- tb->static_stack.slot = 0;
+ if (stack != NULL) {
+ stack->pos = 0;
+ stack->slot = 0;
+ }
}
-static ERTS_INLINE void free_term(DbTableTree *tb, TreeDbTerm* p)
+static ERTS_INLINE void reset_static_stack(DbTableTree* tb)
{
- db_free_term((DbTable*)tb, p, offsetof(TreeDbTerm, dbterm));
+ if (tb != NULL) {
+ ASSERT(IS_TREE_TABLE(tb->common.type));
+ reset_stack(&tb->static_stack);
+ }
}
-static ERTS_INLINE TreeDbTerm* new_dbterm(DbTableTree *tb, Eterm obj)
+static ERTS_INLINE TreeDbTerm* new_dbterm(DbTableCommon *tb, Eterm obj)
{
TreeDbTerm* p;
- if (tb->common.compress) {
- p = db_store_term_comp(&tb->common, NULL, offsetof(TreeDbTerm,dbterm), obj);
+ if (tb->compress) {
+ p = db_store_term_comp(tb, NULL, offsetof(TreeDbTerm,dbterm), obj);
}
else {
- p = db_store_term(&tb->common, NULL, offsetof(TreeDbTerm,dbterm), obj);
+ p = db_store_term(tb, NULL, offsetof(TreeDbTerm,dbterm), obj);
}
return p;
}
-static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableTree *tb, TreeDbTerm* old,
+static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableCommon *tb, TreeDbTerm* old,
Eterm obj)
{
TreeDbTerm* p;
ASSERT(old != NULL);
- if (tb->common.compress) {
- p = db_store_term_comp(&tb->common, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj);
+ if (tb->compress) {
+ p = db_store_term_comp(tb, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj);
}
else {
- p = db_store_term(&tb->common, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj);
+ p = db_store_term(tb, &(old->dbterm), offsetof(TreeDbTerm,dbterm), obj);
}
return p;
}
/*
-** Some macros for "direction stacks"
-*/
-#define DIR_LEFT 0
-#define DIR_RIGHT 1
-#define DIR_END 2
-
-/*
* Number of records to delete before trapping.
*/
#define DELETE_RECORD_LIMIT 12000
@@ -208,31 +202,37 @@ static void do_dump_tree2(DbTableTree*, int to, void *to_arg, int show,
** Datatypes
*/
+enum ms_key_boundness {
+ /* Order significant, larger means more "boundness" => less iteration */
+ MS_KEY_UNBOUND = 0,
+ MS_KEY_PARTIALLY_BOUND = 1,
+ MS_KEY_BOUND = 2,
+ MS_KEY_IMPOSSIBLE = 3
+};
+
/*
* This structure is filled in by analyze_pattern() for the select
* functions.
*/
struct mp_info {
- int something_can_match; /* The match_spec is not "impossible" */
- int some_limitation; /* There is some limitation on the search
- * area, i. e. least and/or most is set.*/
- int got_partial; /* The limitation has a partially bound
- * key */
+ enum ms_key_boundness key_boundness;
Eterm least; /* The lowest matching key (possibly
* partially bound expression) */
Eterm most; /* The highest matching key (possibly
* partially bound expression) */
-
- TreeDbTerm **save_term; /* If the key is completely bound, this
- * will be the Tree node we're searching
- * for, otherwise it will be useless */
Binary *mp; /* The compiled match program */
};
+struct select_common {
+ TreeDbTerm **root;
+};
+
+
/*
* Used by doit_select(_chunk)
*/
struct select_context {
+ struct select_common common;
Process *p;
Eterm accum;
Binary *mp;
@@ -248,6 +248,7 @@ struct select_context {
* Used by doit_select_count
*/
struct select_count_context {
+ struct select_common common;
Process *p;
Binary *mp;
Eterm end_condition;
@@ -261,8 +262,10 @@ struct select_count_context {
* Used by doit_select_delete
*/
struct select_delete_context {
+ struct select_common common;
Process *p;
- DbTableTree *tb;
+ DbTableCommon *tb;
+ DbTreeStack *stack;
Uint accum;
Binary *mp;
Eterm end_condition;
@@ -276,8 +279,9 @@ struct select_delete_context {
* Used by doit_select_replace
*/
struct select_replace_context {
+ struct select_common common;
Process *p;
- DbTableTree *tb;
+ DbTableCommon *tb;
Binary *mp;
Eterm end_condition;
Eterm *lastobj;
@@ -292,75 +296,83 @@ typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Ete
/*
** Forward declarations
*/
-static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key);
-static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
- Eterm object);
+static TreeDbTerm *linkout_tree(DbTableCommon *tb, TreeDbTerm **root,
+ Eterm key, DbTreeStack *stack);
+static TreeDbTerm *linkout_object_tree(DbTableCommon *tb, TreeDbTerm **root,
+ Eterm object, DbTableTree *stack);
static SWord do_free_tree_continue(DbTableTree *tb, SWord reds);
-static void free_term(DbTableTree *tb, TreeDbTerm* p);
-static int balance_left(TreeDbTerm **this);
-static int balance_right(TreeDbTerm **this);
+static void free_term(DbTable *tb, TreeDbTerm* p);
+int tree_balance_left(TreeDbTerm **this);
+int tree_balance_right(TreeDbTerm **this);
static int delsub(TreeDbTerm **this);
-static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot);
-static TreeDbTerm *find_node(DbTableTree *tb, Eterm key);
-static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key);
-static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack*, TreeDbTerm *this);
-static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack*, Eterm key);
-static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack*, Eterm key);
-static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack*,
- Eterm key);
-static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack*,
- Eterm key);
-static void traverse_backwards(DbTableTree *tb,
+static TreeDbTerm *slot_search(Process *p, TreeDbTerm *root, Sint slot,
+ DbTable *tb, DbTableTree *stack_container,
+ CATreeRootIterator *iter, int* is_EOT);
+static TreeDbTerm *find_node(DbTableCommon *tb, TreeDbTerm *root,
+ Eterm key, DbTableTree *stack_container);
+static TreeDbTerm **find_node2(DbTableCommon *tb, TreeDbTerm **root, Eterm key);
+static TreeDbTerm **find_ptr(DbTableCommon *tb, TreeDbTerm **root,
+ DbTreeStack *stack, TreeDbTerm *this);
+static TreeDbTerm *find_next(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key);
+static TreeDbTerm *find_prev(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key);
+static TreeDbTerm *find_next_from_pb_key(DbTable*, TreeDbTerm*** rootpp,
+ DbTreeStack* stack, Eterm key,
+ CATreeRootIterator*);
+static TreeDbTerm *find_prev_from_pb_key(DbTable*, TreeDbTerm*** rootpp,
+ DbTreeStack* stack, Eterm key,
+ CATreeRootIterator*);
+typedef int traverse_doit_funcT(DbTableCommon*, TreeDbTerm*,
+ struct select_common*, int forward);
+
+static void traverse_backwards(DbTableCommon *tb,
DbTreeStack*,
Eterm lastkey,
- int (*doit)(DbTableTree *tb,
- TreeDbTerm *,
- void *,
- int),
- void *context);
-static void traverse_forward(DbTableTree *tb,
+ traverse_doit_funcT*,
+ struct select_common *context,
+ CATreeRootIterator*);
+static void traverse_forward(DbTableCommon *tb,
DbTreeStack*,
Eterm lastkey,
- int (*doit)(DbTableTree *tb,
- TreeDbTerm *,
- void *,
- int),
- void *context);
-static void traverse_update_backwards(DbTableTree *tb,
+ traverse_doit_funcT*,
+ struct select_common *context,
+ CATreeRootIterator*);
+static void traverse_update_backwards(DbTableCommon *tb,
DbTreeStack*,
Eterm lastkey,
- int (*doit)(DbTableTree *tb,
+ int (*doit)(DbTableCommon *tb,
TreeDbTerm **, // out
- void *,
+ struct select_common*,
int),
- void *context);
-static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret,
- Eterm *partly_bound_key);
-static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key);
+ struct select_common*,
+ CATreeRootIterator*);
+static enum ms_key_boundness key_boundness(DbTableCommon *tb,
+ Eterm pattern, Eterm *keyp);
static Sint do_cmp_partly_bound(Eterm a, Eterm b, int *done);
-static int analyze_pattern(DbTableTree *tb, Eterm pattern,
+static int analyze_pattern(DbTableCommon *tb, Eterm pattern,
extra_match_validator_t extra_validator, /* Optional callback */
struct mp_info *mpi);
-static int doit_select(DbTableTree *tb,
- TreeDbTerm *this,
- void *ptr,
+static int doit_select(DbTableCommon *tb,
+ TreeDbTerm *this,
+ struct select_common* ptr,
int forward);
-static int doit_select_count(DbTableTree *tb,
+static int doit_select_count(DbTableCommon *tb,
TreeDbTerm *this,
- void *ptr,
+ struct select_common*,
int forward);
-static int doit_select_chunk(DbTableTree *tb,
+static int doit_select_chunk(DbTableCommon *tb,
TreeDbTerm *this,
- void *ptr,
+ struct select_common*,
int forward);
-static int doit_select_delete(DbTableTree *tb,
+static int doit_select_delete(DbTableCommon *tb,
TreeDbTerm *this,
- void *ptr,
+ struct select_common*,
int forward);
-static int doit_select_replace(DbTableTree *tb,
+static int doit_select_replace(DbTableCommon *tb,
TreeDbTerm **this_ptr,
- void *ptr,
+ struct select_common*,
int forward);
static int partly_bound_can_match_lesser(Eterm partly_bound_1,
@@ -396,24 +408,31 @@ static int db_erase_object_tree(DbTable *tbl, Eterm object,Eterm *ret);
static int db_slot_tree(Process *p, DbTable *tbl,
Eterm slot_term, Eterm *ret);
static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, int reversed, Eterm *ret);
+ Eterm pattern, int reversed, Eterm *ret,
+ enum DbIterSafety);
static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret);
+ Eterm pattern, Eterm *ret, enum DbIterSafety);
static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
- int reversed, Eterm *ret);
+ int reversed, Eterm *ret, enum DbIterSafety);
static int db_select_continue_tree(Process *p, DbTable *tbl,
- Eterm continuation, Eterm *ret);
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
static int db_select_count_continue_tree(Process *p, DbTable *tbl,
- Eterm continuation, Eterm *ret);
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret);
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety);
static int db_select_delete_continue_tree(Process *p, DbTable *tbl,
- Eterm continuation, Eterm *ret);
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret);
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety);
static int db_select_replace_continue_tree(Process *p, DbTable *tbl,
- Eterm continuation, Eterm *ret);
+ Eterm continuation, Eterm *ret,
+ enum DbIterSafety*);
static int db_take_tree(Process *, DbTable *, Eterm, Eterm *);
static void db_print_tree(fmtfn_t to, void *to_arg,
int show, DbTable *tbl);
@@ -425,8 +444,12 @@ static void db_foreach_offheap_tree(DbTable *,
void (*)(ErlOffHeap *, void *),
void *);
-static SWord db_delete_all_objects_tree(Process* p, DbTable* tbl, SWord reds);
-
+static SWord db_delete_all_objects_tree(Process* p,
+ DbTable* tbl,
+ SWord reds,
+ Eterm* nitems_holder_wb);
+static Eterm db_delete_all_objects_get_nitems_from_holder_tree(Process* p,
+ Eterm nitems_holder);
#ifdef HARDDEBUG
static void db_check_table_tree(DbTable *tbl);
#endif
@@ -470,6 +493,7 @@ DbTableMethod db_tree =
db_select_replace_continue_tree,
db_take_tree,
db_delete_all_objects_tree,
+ db_delete_all_objects_get_nitems_from_holder_tree,
db_free_empty_table_tree,
db_free_table_continue_tree,
db_print_tree,
@@ -508,18 +532,18 @@ int db_create_tree(Process *p, DbTable *tbl)
return DB_ERROR_NONE;
}
-static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret)
+int db_first_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm *ret, DbTableTree *stack_container)
{
- DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
TreeDbTerm *this;
- if (( this = tb->root ) == NULL) {
+ if (( this = root ) == NULL) {
*ret = am_EOT;
return DB_ERROR_NONE;
}
/* Walk down the tree to the left */
- if ((stack = get_static_stack(tb)) != NULL) {
+ if ((stack = get_static_stack(stack_container)) != NULL) {
stack->pos = stack->slot = 0;
}
while (this->left != NULL) {
@@ -529,23 +553,27 @@ static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret)
if (stack) {
PUSH_NODE(stack, this);
stack->slot = 1;
- release_stack(tb,stack);
+ release_stack(tbl,stack_container,stack);
}
*ret = db_copy_key(p, tbl, &this->dbterm);
return DB_ERROR_NONE;
}
-static int db_next_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
- DbTreeStack* stack;
+ return db_first_tree_common(p, tbl, tb->root, ret, tb);
+}
+
+int db_next_tree_common(Process *p, DbTable *tbl,
+ TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTreeStack* stack)
+{
TreeDbTerm *this;
- if (is_atom(key) && key == am_EOT)
+ if (key == am_EOT)
return DB_ERROR_BADKEY;
- stack = get_any_stack(tb);
- this = find_next(tb, stack, key);
- release_stack(tb,stack);
+ this = find_next(&tbl->common, root, stack, key);
if (this == NULL) {
*ret = am_EOT;
return DB_ERROR_NONE;
@@ -554,18 +582,27 @@ static int db_next_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
return DB_ERROR_NONE;
}
-static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret)
+static int db_next_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack = get_any_stack(tbl, tb);
+ int ret_val = db_next_tree_common(p, tbl, tb->root, key, ret, stack);
+ release_stack(tbl,tb,stack);
+ return ret_val;
+}
+
+int db_last_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm *ret, DbTableTree *stack_container)
+{
TreeDbTerm *this;
DbTreeStack* stack;
- if (( this = tb->root ) == NULL) {
+ if (( this = root ) == NULL) {
*ret = am_EOT;
return DB_ERROR_NONE;
}
/* Walk down the tree to the right */
- if ((stack = get_static_stack(tb)) != NULL) {
+ if ((stack = get_static_stack(stack_container)) != NULL) {
stack->pos = stack->slot = 0;
}
while (this->right != NULL) {
@@ -574,24 +611,28 @@ static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret)
}
if (stack) {
PUSH_NODE(stack, this);
- stack->slot = NITEMS(tb);
- release_stack(tb,stack);
+ /* Always centralized counters when static stack is used */
+ stack->slot = NITEMS_CENTRALIZED(tbl);
+ release_stack(tbl,stack_container,stack);
}
*ret = db_copy_key(p, tbl, &this->dbterm);
return DB_ERROR_NONE;
}
-static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_last_tree_common(p, tbl, tb->root, ret, tb);
+}
+
+int db_prev_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTreeStack* stack)
+{
TreeDbTerm *this;
- DbTreeStack* stack;
- if (is_atom(key) && key == am_EOT)
+ if (key == am_EOT)
return DB_ERROR_BADKEY;
- stack = get_any_stack(tb);
- this = find_prev(tb, stack, key);
- release_stack(tb,stack);
+ this = find_prev(&tbl->common, root, stack, key);
if (this == NULL) {
*ret = am_EOT;
return DB_ERROR_NONE;
@@ -600,25 +641,30 @@ static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
return DB_ERROR_NONE;
}
-static ERTS_INLINE Sint cmp_key(DbTableTree* tb, Eterm key, TreeDbTerm* obj) {
- return CMP(key, GETKEY(tb,obj->dbterm.tpl));
+static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack = get_any_stack(tbl, tb);
+ int res = db_prev_tree_common(p, tbl, tb->root, key, ret, stack);
+ release_stack(tbl,tb,stack);
+ return res;
}
-static ERTS_INLINE int cmp_key_eq(DbTableTree* tb, Eterm key, TreeDbTerm* obj) {
+static ERTS_INLINE int cmp_key_eq(DbTableCommon* tb, Eterm key, TreeDbTerm* obj) {
Eterm obj_key = GETKEY(tb,obj->dbterm.tpl);
return is_same(key, obj_key) || CMP(key, obj_key) == 0;
}
-static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
+int db_put_tree_common(DbTableCommon *tb, TreeDbTerm **root, Eterm obj,
+ int key_clash_fail, DbTableTree *stack_container)
{
- DbTableTree *tb = &tbl->tree;
/* Non recursive insertion in AVL tree, building our own stack */
TreeDbTerm **tstack[STACK_NEED];
int tpos = 0;
int dstack[STACK_NEED+1];
int dpos = 0;
int state = 0;
- TreeDbTerm **this = &tb->root;
+ TreeDbTerm **this = root;
Sint c;
Eterm key;
int dir;
@@ -626,16 +672,13 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
key = GETKEY(tb, tuple_val(obj));
- reset_static_stack(tb);
+ reset_static_stack(stack_container);
dstack[dpos++] = DIR_END;
for (;;)
if (!*this) { /* Found our place */
state = 1;
- if (erts_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) {
- erts_atomic_dec_nob(&tb->common.nitems);
- return DB_ERROR_SYSRES;
- }
+ INC_NITEMS(((DbTable*)tb));
*this = new_dbterm(tb, obj);
(*this)->balance = 0;
(*this)->left = (*this)->right = NULL;
@@ -724,9 +767,15 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
return DB_ERROR_NONE;
}
-static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
{
DbTableTree *tb = &tbl->tree;
+ return db_put_tree_common(&tb->common, &tb->root, obj, key_clash_fail, tb);
+}
+
+int db_get_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTableTree *stack_container)
+{
Eterm copy;
Eterm *hp, *hend;
TreeDbTerm *this;
@@ -737,13 +786,13 @@ static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
* The list created around it is purely for interface conformance.
*/
- this = find_node(tb,key);
+ this = find_node(tb,root,key,stack_container);
if (this == NULL) {
*ret = NIL;
} else {
hp = HAlloc(p, this->dbterm.size + 2);
hend = hp + this->dbterm.size + 2;
- copy = db_copy_object_from_ets(&tb->common, &this->dbterm, &hp, &MSO(p));
+ copy = db_copy_object_from_ets(tb, &this->dbterm, &hp, &MSO(p));
*ret = CONS(hp, copy, NIL);
hp += 2;
HRelease(p,hend,hp);
@@ -751,18 +800,28 @@ static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
return DB_ERROR_NONE;
}
-static int db_member_tree(DbTable *tbl, Eterm key, Eterm *ret)
+static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_get_tree_common(p, &tb->common, tb->root, key, ret, tb);
+}
- *ret = (find_node(tb,key) == NULL) ? am_false : am_true;
+int db_member_tree_common(DbTableCommon *tb, TreeDbTerm *root, Eterm key, Eterm *ret,
+ DbTableTree *stack_container)
+{
+ *ret = (find_node(tb,root,key,stack_container) == NULL) ? am_false : am_true;
return DB_ERROR_NONE;
}
-static int db_get_element_tree(Process *p, DbTable *tbl,
- Eterm key, int ndex, Eterm *ret)
+static int db_member_tree(DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_member_tree_common(&tb->common, tb->root, key, ret, tb);
+}
+
+int db_get_element_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key,
+ int ndex, Eterm *ret, DbTableTree *stack_container)
+{
/*
* Look the node up:
*/
@@ -776,54 +835,74 @@ static int db_get_element_tree(Process *p, DbTable *tbl,
* around the element here either.
*/
- this = find_node(tb,key);
+ this = find_node(tb,root,key,stack_container);
if (this == NULL) {
return DB_ERROR_BADKEY;
} else {
if (ndex > arityval(this->dbterm.tpl[0])) {
return DB_ERROR_BADPARAM;
}
- *ret = db_copy_element_from_ets(&tb->common, p, &this->dbterm, ndex, &hp, 0);
+ *ret = db_copy_element_from_ets(tb, p, &this->dbterm, ndex, &hp, 0);
}
return DB_ERROR_NONE;
}
-static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret)
+static int db_get_element_tree(Process *p, DbTable *tbl,
+ Eterm key, int ndex, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_get_element_tree_common(p, &tb->common, tb->root, key,
+ ndex, ret, tb);
+}
+
+int db_erase_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm key, Eterm *ret,
+ DbTreeStack *stack /* NULL if no static stack */)
+{
TreeDbTerm *res;
*ret = am_true;
- if ((res = linkout_tree(tb, key)) != NULL) {
- free_term(tb, res);
+ if ((res = linkout_tree(&tbl->common, root,key, stack)) != NULL) {
+ free_term(tbl, res);
}
return DB_ERROR_NONE;
}
-static int db_erase_object_tree(DbTable *tbl, Eterm object, Eterm *ret)
+static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_erase_tree_common(tbl, &tb->root, key, ret, &tb->static_stack);
+}
+
+int db_erase_object_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm object,
+ Eterm *ret, DbTableTree *stack_container)
+{
TreeDbTerm *res;
*ret = am_true;
- if ((res = linkout_object_tree(tb, object)) != NULL) {
- free_term(tb, res);
+ if ((res = linkout_object_tree(&tbl->common, root, object, stack_container)) != NULL) {
+ free_term(tbl, res);
}
return DB_ERROR_NONE;
}
-
-static int db_slot_tree(Process *p, DbTable *tbl,
- Eterm slot_term, Eterm *ret)
+static int db_erase_object_tree(DbTable *tbl, Eterm object, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
+ return db_erase_object_tree_common(tbl, &tb->root, object, ret, tb);
+}
+
+int db_slot_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm slot_term, Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator *iter)
+{
Sint slot;
TreeDbTerm *st;
Eterm *hp, *hend;
Eterm copy;
-
+ int is_EOT = 0;
/*
* The notion of a "slot" is not natural in a tree, but we try to
* simulate it by giving the n'th node in the tree instead.
@@ -834,10 +913,10 @@ static int db_slot_tree(Process *p, DbTable *tbl,
if (is_not_small(slot_term) ||
((slot = signed_val(slot_term)) < 0) ||
- (slot > NITEMS(tb)))
+ (IS_CENTRALIZED_CTR(tbl) && slot > NITEMS_CENTRALIZED(tbl)))
return DB_ERROR_BADPARAM;
- if (slot == NITEMS(tb)) {
+ if (IS_CENTRALIZED_CTR(tbl) && slot == NITEMS_CENTRALIZED(tbl)) {
*ret = am_EOT;
return DB_ERROR_NONE;
}
@@ -847,20 +926,31 @@ static int db_slot_tree(Process *p, DbTable *tbl,
* are counted from 1 and up.
*/
++slot;
- st = slot_search(p, tb, slot);
+ st = slot_search(p, root, slot, tbl, stack_container, iter, &is_EOT);
+ if (is_EOT) {
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
if (st == NULL) {
*ret = am_false;
return DB_ERROR_UNSPEC;
}
hp = HAlloc(p, st->dbterm.size + 2);
hend = hp + st->dbterm.size + 2;
- copy = db_copy_object_from_ets(&tb->common, &st->dbterm, &hp, &MSO(p));
+ copy = db_copy_object_from_ets(&tbl->common, &st->dbterm, &hp, &MSO(p));
*ret = CONS(hp, copy, NIL);
hp += 2;
HRelease(p,hend,hp);
return DB_ERROR_NONE;
}
+static int db_slot_tree(Process *p, DbTable *tbl,
+ Eterm slot_term, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ return db_slot_tree_common(p, tbl, tb->root, slot_term, ret, tb, NULL);
+}
+
static BIF_RETTYPE ets_select_reverse(BIF_ALIST_3)
@@ -926,19 +1016,14 @@ static BIF_RETTYPE bif_trap3(Export *bif,
{
BIF_TRAP3(bif, p, p1, p2, p3);
}
-
-/*
-** This is called either when the select bif traps or when ets:select/1
-** is called. It does mostly the same as db_select_tree and may in either case
-** trap to itself again (via the ets:select/1 bif).
-** Note that this is common for db_select_tree and db_select_chunk_tree.
-*/
-static int db_select_continue_tree(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+
+int db_select_continue_tree_common(Process *p,
+ DbTableCommon *tb,
+ Eterm continuation,
+ Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter)
{
- DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
struct select_context sc;
unsigned sz;
@@ -951,7 +1036,6 @@ static int db_select_continue_tree(Process *p,
Sint chunk_size;
Sint reverse;
-
#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0);
/* Decode continuation. We know it's a tuple but not the arity or
@@ -980,28 +1064,37 @@ static int db_select_continue_tree(Process *p,
sc.end_condition = NIL;
sc.lastobj = NULL;
sc.max = 1000;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tb->keypos;
sc.chunk_size = chunk_size;
reverse = unsigned_val(tptr[7]);
sc.got = signed_val(tptr[8]);
- stack = get_any_stack(tb);
- if (chunk_size) {
- if (reverse) {
- traverse_backwards(tb, stack, lastkey, &doit_select_chunk, &sc);
- } else {
- traverse_forward(tb, stack, lastkey, &doit_select_chunk, &sc);
- }
- } else {
- if (reverse) {
- traverse_forward(tb, stack, lastkey, &doit_select, &sc);
- } else {
- traverse_backwards(tb, stack, lastkey, &doit_select, &sc);
- }
+ if (iter) {
+ iter->next_route_key = lastkey;
+ sc.common.root = catree_find_nextprev_root(iter, !!reverse != !!chunk_size, NULL);
}
- release_stack(tb,stack);
+ else
+ sc.common.root = &((DbTableTree*)tb)->root;
+
+ if (sc.common.root) {
+ stack = get_any_stack((DbTable*)tb, stack_container);
+ if (chunk_size) {
+ if (reverse) {
+ traverse_backwards(tb, stack, lastkey, &doit_select_chunk, &sc.common, iter);
+ } else {
+ traverse_forward(tb, stack, lastkey, &doit_select_chunk, &sc.common, iter);
+ }
+ } else {
+ if (reverse) {
+ traverse_forward(tb, stack, lastkey, &doit_select, &sc.common, iter);
+ } else {
+ traverse_backwards(tb, stack, lastkey, &doit_select, &sc.common, iter);
+ }
+ }
+ release_stack((DbTable*)tb,stack_container,stack);
- BUMP_REDS(p, 1000 - sc.max);
+ BUMP_REDS(p, 1000 - sc.max);
+ }
if (sc.max > 0 || (chunk_size && sc.got == chunk_size)) {
if (chunk_size) {
@@ -1082,13 +1175,30 @@ static int db_select_continue_tree(Process *p,
#undef RET_TO_BIF
}
+
+/*
+** This is called either when the select bif traps or when ets:select/1
+** is called. It does mostly the same as db_select_tree and may in either case
+** trap to itself again (via the ets:select/1 bif).
+** Note that this is common for db_select_tree and db_select_chunk_tree.
+*/
+static int db_select_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret,
+ enum DbIterSafety* safety_p)
+{
+ DbTableTree *tb = &tbl->tree;
+ return db_select_continue_tree_common(p, &tb->common,
+ continuation, ret, tb, NULL);
+}
-
-static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, int reverse, Eterm *ret)
+int db_select_tree_common(Process *p, DbTable *tb,
+ Eterm tid, Eterm pattern, int reverse, Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter)
{
/* Strategy: Traverse backwards to build resulting list from tail to head */
- DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
struct select_context sc;
struct mp_info mpi;
@@ -1121,42 +1231,62 @@ static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
sc.got = 0;
sc.chunk_size = 0;
- if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(&tb->common, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
- if (!mpi.something_can_match) {
+ if (mpi.key_boundness == MS_KEY_IMPOSSIBLE) {
RET_TO_BIF(NIL,DB_ERROR_NONE);
/* can't possibly match anything */
}
sc.mp = mpi.mp;
- if (!mpi.got_partial && mpi.some_limitation &&
- CMP_EQ(mpi.least,mpi.most)) {
- doit_select(tb,*(mpi.save_term),&sc,0 /* direction doesn't matter */);
+ if (mpi.key_boundness == MS_KEY_BOUND) {
+ ASSERT(CMP_EQ(mpi.least, mpi.most));
+ if (iter)
+ sc.common.root = catree_find_root(mpi.least, iter);
+ else
+ sc.common.root = &tb->tree.root;
+ this = find_node(&tb->common, *sc.common.root, mpi.least, NULL);
+ if (this)
+ doit_select(&tb->common, this, &sc.common, 0 /* direction doesn't matter */);
RET_TO_BIF(sc.accum,DB_ERROR_NONE);
}
- stack = get_any_stack(tb);
+ stack = get_any_stack((DbTable*)tb,stack_container);
if (reverse) {
- if (mpi.some_limitation) {
- if ((this = find_prev_from_pb_key(tb, stack, mpi.least)) != NULL) {
+ if (mpi.key_boundness == MS_KEY_PARTIALLY_BOUND) {
+ this = find_prev_from_pb_key(tb, &sc.common.root, stack, mpi.least, iter);
+ if (this)
lastkey = GETKEY(tb, this->dbterm.tpl);
- }
sc.end_condition = mpi.most;
}
- traverse_forward(tb, stack, lastkey, &doit_select, &sc);
+ else {
+ ASSERT(mpi.key_boundness == MS_KEY_UNBOUND);
+ if (iter)
+ sc.common.root = catree_find_first_root(iter);
+ else
+ sc.common.root = &tb->tree.root;
+ }
+ traverse_forward(&tb->common, stack, lastkey, &doit_select, &sc.common, iter);
} else {
- if (mpi.some_limitation) {
- if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
- lastkey = GETKEY(tb, this->dbterm.tpl);
- }
+ if (mpi.key_boundness == MS_KEY_PARTIALLY_BOUND) {
+ this = find_next_from_pb_key(tb, &sc.common.root, stack, mpi.most, iter);
+ if (this)
+ lastkey = GETKEY(tb, this->dbterm.tpl);
sc.end_condition = mpi.least;
}
- traverse_backwards(tb, stack, lastkey, &doit_select, &sc);
+ else {
+ ASSERT(mpi.key_boundness == MS_KEY_UNBOUND);
+ if (iter)
+ sc.common.root = catree_find_last_root(iter);
+ else
+ sc.common.root = &tb->tree.root;
+ }
+ traverse_backwards(&tb->common, stack, lastkey, &doit_select, &sc.common, iter);
}
- release_stack(tb,stack);
+ release_stack((DbTable*)tb,stack_container,stack);
#ifdef HARDDEBUG
erts_fprintf(stderr,"Least: %T\n", mpi.least);
erts_fprintf(stderr,"Most: %T\n", mpi.most);
@@ -1192,16 +1322,21 @@ static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
}
-
-/*
-** This is called either when the select_count bif traps.
-*/
-static int db_select_count_continue_tree(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, int reverse, Eterm *ret,
+ enum DbIterSafety safety)
+{
+ return db_select_tree_common(p, tbl, tid,
+ pattern, reverse, ret, &tbl->tree, NULL);
+}
+
+int db_select_count_continue_tree_common(Process *p,
+ DbTable *tb,
+ Eterm continuation,
+ Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter)
{
- DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
struct select_count_context sc;
unsigned sz;
@@ -1213,7 +1348,6 @@ static int db_select_count_continue_tree(Process *p,
Eterm *tptr;
Eterm egot;
-
#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0);
/* Decode continuation. We know it's a tuple and everything else as
@@ -1245,11 +1379,21 @@ static int db_select_count_continue_tree(Process *p,
sc.got = unsigned_val(tptr[5]);
}
- stack = get_any_stack(tb);
- traverse_backwards(tb, stack, lastkey, &doit_select_count, &sc);
- release_stack(tb,stack);
+ if (iter) {
+ iter->next_route_key = lastkey;
+ sc.common.root = catree_find_prev_root(iter, NULL);
+ }
+ else {
+ sc.common.root = &tb->tree.root;
+ }
- BUMP_REDS(p, 1000 - sc.max);
+ if (sc.common.root) {
+ stack = get_any_stack(tb, stack_container);
+ traverse_backwards(&tb->common, stack, lastkey, &doit_select_count, &sc.common, iter);
+ release_stack(tb,stack_container,stack);
+
+ BUMP_REDS(p, 1000 - sc.max);
+ }
if (sc.max > 0) {
RET_TO_BIF(erts_make_integer(sc.got,p), DB_ERROR_NONE);
@@ -1285,11 +1429,26 @@ static int db_select_count_continue_tree(Process *p,
#undef RET_TO_BIF
}
-
-static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret)
+/*
+** This is called either when the select_count bif traps.
+*/
+static int db_select_count_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret,
+ enum DbIterSafety* safety_p)
{
DbTableTree *tb = &tbl->tree;
+ return db_select_count_continue_tree_common(p, tbl,
+ continuation, ret, tb, NULL);
+}
+
+
+int db_select_count_tree_common(Process *p, DbTable *tb,
+ Eterm tid, Eterm pattern, Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter)
+{
DbTreeStack* stack;
struct select_count_context sc;
struct mp_info mpi;
@@ -1303,7 +1462,6 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm egot;
Eterm mpb;
-
#define RET_TO_BIF(Term,RetVal) do { \
if (mpi.mp != NULL) { \
erts_bin_free(mpi.mp); \
@@ -1321,33 +1479,46 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
sc.keypos = tb->common.keypos;
sc.got = 0;
- if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(&tb->common, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
- if (!mpi.something_can_match) {
+ if (mpi.key_boundness == MS_KEY_IMPOSSIBLE) {
RET_TO_BIF(make_small(0),DB_ERROR_NONE);
/* can't possibly match anything */
}
sc.mp = mpi.mp;
- if (!mpi.got_partial && mpi.some_limitation &&
- CMP_EQ(mpi.least,mpi.most)) {
- doit_select_count(tb,*(mpi.save_term),&sc,0 /* dummy */);
+ if (mpi.key_boundness == MS_KEY_BOUND) {
+ ASSERT(CMP_EQ(mpi.least, mpi.most));
+ if (iter)
+ sc.common.root = catree_find_root(mpi.least, iter);
+ else
+ sc.common.root = &((DbTable*)tb)->tree.root;
+ this = find_node(&tb->common, *sc.common.root, mpi.least, NULL);
+ if (this)
+ doit_select_count(&tb->common, this, &sc.common, 0 /* dummy */);
RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE);
}
- stack = get_any_stack(tb);
- if (mpi.some_limitation) {
- if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
- lastkey = GETKEY(tb, this->dbterm.tpl);
- }
+ stack = get_any_stack((DbTable*)tb, stack_container);
+ if (mpi.key_boundness == MS_KEY_PARTIALLY_BOUND) {
+ this = find_next_from_pb_key(tb, &sc.common.root, stack, mpi.most, iter);
+ if (this)
+ lastkey = GETKEY(tb, this->dbterm.tpl);
sc.end_condition = mpi.least;
}
+ else {
+ ASSERT(mpi.key_boundness == MS_KEY_UNBOUND);
+ if (iter)
+ sc.common.root = catree_find_last_root(iter);
+ else
+ sc.common.root = &tb->tree.root;
+ }
- traverse_backwards(tb, stack, lastkey, &doit_select_count, &sc);
- release_stack(tb,stack);
+ traverse_backwards(&tb->common, stack, lastkey, &doit_select_count, &sc.common, iter);
+ release_stack((DbTable*)tb,stack_container,stack);
BUMP_REDS(p, 1000 - sc.max);
if (sc.max > 0) {
RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE);
@@ -1383,12 +1554,22 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
}
-static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Sint chunk_size,
- int reverse,
- Eterm *ret)
+static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety safety)
{
DbTableTree *tb = &tbl->tree;
+ return db_select_count_tree_common(p, tbl,
+ tid, pattern, ret, tb, NULL);
+}
+
+
+int db_select_chunk_tree_common(Process *p, DbTable *tb,
+ Eterm tid, Eterm pattern, Sint chunk_size,
+ int reverse, Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter)
+{
DbTreeStack* stack;
struct select_context sc;
struct mp_info mpi;
@@ -1401,7 +1582,6 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
int errcode;
Eterm mpb;
-
#define RET_TO_BIF(Term,RetVal) do { \
if (mpi.mp != NULL) { \
erts_bin_free(mpi.mp); \
@@ -1421,20 +1601,26 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
sc.got = 0;
sc.chunk_size = chunk_size;
- if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(&tb->common, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
- if (!mpi.something_can_match) {
+ if (mpi.key_boundness == MS_KEY_IMPOSSIBLE) {
RET_TO_BIF(am_EOT,DB_ERROR_NONE);
/* can't possibly match anything */
}
sc.mp = mpi.mp;
- if (!mpi.got_partial && mpi.some_limitation &&
- CMP_EQ(mpi.least,mpi.most)) {
- doit_select(tb,*(mpi.save_term),&sc, 0 /* direction doesn't matter */);
+ if (mpi.key_boundness == MS_KEY_BOUND) {
+ ASSERT(CMP_EQ(mpi.least, mpi.most));
+ if (iter)
+ sc.common.root = catree_find_root(mpi.least, iter);
+ else
+ sc.common.root = &tb->tree.root;
+ this = find_node(&tb->common, *sc.common.root, mpi.least, NULL);
+ if (this)
+ doit_select(&tb->common, this, &sc.common, 0 /* direction doesn't matter */);
if (sc.accum != NIL) {
hp=HAlloc(p, 3);
RET_TO_BIF(TUPLE2(hp,sc.accum,am_EOT),DB_ERROR_NONE);
@@ -1443,25 +1629,39 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
}
}
- stack = get_any_stack(tb);
+ stack = get_any_stack((DbTable*)tb,stack_container);
if (reverse) {
- if (mpi.some_limitation) {
- if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
- lastkey = GETKEY(tb, this->dbterm.tpl);
- }
+ if (mpi.key_boundness == MS_KEY_PARTIALLY_BOUND) {
+ this = find_next_from_pb_key(tb, &sc.common.root, stack, mpi.most, iter);
+ if (this)
+ lastkey = GETKEY(tb, this->dbterm.tpl);
sc.end_condition = mpi.least;
}
- traverse_backwards(tb, stack, lastkey, &doit_select_chunk, &sc);
+ else {
+ ASSERT(mpi.key_boundness == MS_KEY_UNBOUND);
+ if (iter)
+ sc.common.root = catree_find_last_root(iter);
+ else
+ sc.common.root = &tb->tree.root;
+ }
+ traverse_backwards(&tb->common, stack, lastkey, &doit_select_chunk, &sc.common, iter);
} else {
- if (mpi.some_limitation) {
- if ((this = find_prev_from_pb_key(tb, stack, mpi.least)) != NULL) {
- lastkey = GETKEY(tb, this->dbterm.tpl);
- }
+ if (mpi.key_boundness == MS_KEY_PARTIALLY_BOUND) {
+ this = find_prev_from_pb_key(tb, &sc.common.root, stack, mpi.least, iter);
+ if (this)
+ lastkey = GETKEY(tb, this->dbterm.tpl);
sc.end_condition = mpi.most;
}
- traverse_forward(tb, stack, lastkey, &doit_select_chunk, &sc);
+ else {
+ ASSERT(mpi.key_boundness == MS_KEY_UNBOUND);
+ if (iter)
+ sc.common.root = catree_find_first_root(iter);
+ else
+ sc.common.root = &tb->tree.root;
+ }
+ traverse_forward(&tb->common, stack, lastkey, &doit_select_chunk, &sc.common, iter);
}
- release_stack(tb,stack);
+ release_stack((DbTable*)tb,stack_container,stack);
BUMP_REDS(p, 1000 - sc.max);
if (sc.max > 0 || sc.got == chunk_size) {
@@ -1530,15 +1730,25 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
}
-/*
-** This is called when select_delete traps
-*/
-static int db_select_delete_continue_tree(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Sint chunk_size,
+ int reverse,
+ Eterm *ret, enum DbIterSafety safety)
{
DbTableTree *tb = &tbl->tree;
+ return db_select_chunk_tree_common(p, tbl,
+ tid, pattern, chunk_size,
+ reverse, ret, tb, NULL);
+}
+
+
+int db_select_delete_continue_tree_common(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret,
+ DbTreeStack* stack,
+ CATreeRootIterator* iter)
+{
struct select_delete_context sc;
unsigned sz;
Eterm *hp;
@@ -1549,10 +1759,9 @@ static int db_select_delete_continue_tree(Process *p,
Eterm *tptr;
Eterm eaccsum;
-
#define RET_TO_BIF(Term, State) do { \
if (sc.erase_lastterm) { \
- free_term(tb, sc.lastterm); \
+ free_term(tbl, sc.lastterm); \
} \
*ret = (Term); \
return State; \
@@ -1571,7 +1780,8 @@ static int db_select_delete_continue_tree(Process *p,
mp = erts_db_get_match_prog_binary_unchecked(tptr[4]);
sc.p = p;
- sc.tb = tb;
+ sc.tb = &tbl->common;
+ sc.stack = stack;
if (is_big(tptr[5])) {
sc.accum = big_to_uint32(tptr[5]);
} else {
@@ -1580,17 +1790,26 @@ static int db_select_delete_continue_tree(Process *p,
sc.mp = mp;
sc.end_condition = NIL;
sc.max = 1000;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tbl->common.keypos;
- ASSERT(!erts_atomic_read_nob(&tb->is_stack_busy));
- traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc);
+ if (iter) {
+ iter->next_route_key = lastkey;
+ sc.common.root = catree_find_prev_root(iter, NULL);
+ }
+ else {
+ sc.common.root = &tbl->tree.root;
+ }
- BUMP_REDS(p, 1000 - sc.max);
+ if (sc.common.root) {
+ traverse_backwards(&tbl->common, stack, lastkey, &doit_select_delete, &sc.common, iter);
+
+ BUMP_REDS(p, 1000 - sc.max);
+ }
if (sc.max > 0) {
RET_TO_BIF(erts_make_integer(sc.accum, p), DB_ERROR_NONE);
}
- key = GETKEY(tb, (sc.lastterm)->dbterm.tpl);
+ key = GETKEY(&tbl->common, (sc.lastterm)->dbterm.tpl);
if (end_condition != NIL &&
cmp_partly_bound(end_condition,key) > 0) { /* done anyway */
RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE);
@@ -1620,10 +1839,24 @@ static int db_select_delete_continue_tree(Process *p,
#undef RET_TO_BIF
}
-static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret)
+static int db_select_delete_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret,
+ enum DbIterSafety* safety_p)
{
DbTableTree *tb = &tbl->tree;
+ ASSERT(!erts_atomic_read_nob(&tb->is_stack_busy));
+ return db_select_delete_continue_tree_common(p, tbl, continuation, ret,
+ &tb->static_stack, NULL);
+}
+
+int db_select_delete_tree_common(Process *p, DbTable *tbl,
+ Eterm tid, Eterm pattern,
+ Eterm *ret,
+ DbTreeStack* stack,
+ CATreeRootIterator* iter)
+{
struct select_delete_context sc;
struct mp_info mpi;
Eterm lastkey = THE_NON_VALUE;
@@ -1641,7 +1874,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
erts_bin_free(mpi.mp); \
} \
if (sc.erase_lastterm) { \
- free_term(tb, sc.lastterm); \
+ free_term(tbl, sc.lastterm); \
} \
*ret = (Term); \
return RetVal; \
@@ -1655,42 +1888,57 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
sc.p = p;
sc.max = 1000;
sc.end_condition = NIL;
- sc.keypos = tb->common.keypos;
- sc.tb = tb;
+ sc.keypos = tbl->common.keypos;
+ sc.tb = &tbl->common;
+ sc.stack = stack;
- if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(&tbl->common, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(0,errcode);
}
- if (!mpi.something_can_match) {
+ if (mpi.key_boundness == MS_KEY_IMPOSSIBLE) {
RET_TO_BIF(make_small(0),DB_ERROR_NONE);
/* can't possibly match anything */
}
sc.mp = mpi.mp;
- if (!mpi.got_partial && mpi.some_limitation &&
- CMP_EQ(mpi.least,mpi.most)) {
- doit_select_delete(tb,*(mpi.save_term),&sc, 0 /* direction doesn't
+ if (mpi.key_boundness == MS_KEY_BOUND) {
+ ASSERT(CMP_EQ(mpi.least, mpi.most));
+ if (iter)
+ sc.common.root = catree_find_root(mpi.least, iter);
+ else
+ sc.common.root = &tbl->tree.root;
+ this = find_node(&tbl->common, *sc.common.root, mpi.least, NULL);
+ if (this)
+ doit_select_delete(&tbl->common, this, &sc.common, 0 /* direction doesn't
matter */);
RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE);
}
- if (mpi.some_limitation) {
- if ((this = find_next_from_pb_key(tb, &tb->static_stack, mpi.most)) != NULL) {
- lastkey = GETKEY(tb, this->dbterm.tpl);
- }
+ if (mpi.key_boundness == MS_KEY_PARTIALLY_BOUND) {
+ this = find_next_from_pb_key(tbl, &sc.common.root, stack, mpi.most, iter);
+ if (this)
+ lastkey = GETKEY(&tbl->common, this->dbterm.tpl);
sc.end_condition = mpi.least;
}
+ else {
+ ASSERT(mpi.key_boundness == MS_KEY_UNBOUND);
+ if (iter)
+ sc.common.root = catree_find_last_root(iter);
+ else
+ sc.common.root = &tbl->tree.root;
+ }
- traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc);
+ traverse_backwards(&tbl->common, stack, lastkey,
+ &doit_select_delete, &sc.common, iter);
BUMP_REDS(p, 1000 - sc.max);
if (sc.max > 0) {
RET_TO_BIF(erts_make_integer(sc.accum,p), DB_ERROR_NONE);
}
- key = GETKEY(tb, (sc.lastterm)->dbterm.tpl);
+ key = GETKEY(&tbl->common, (sc.lastterm)->dbterm.tpl);
sz = size_object(key);
if (IS_USMALL(0, sc.accum)) {
hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
@@ -1714,7 +1962,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
/* Don't free mpi.mp, so don't use macro */
if (sc.erase_lastterm) {
- free_term(tb, sc.lastterm);
+ free_term(tbl, sc.lastterm);
}
*ret = bif_trap1(&ets_select_delete_continue_exp, p, continuation);
return DB_ERROR_NONE;
@@ -1723,12 +1971,22 @@ static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
}
-static int db_select_replace_continue_tree(Process *p,
+static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety safety)
+{
+ DbTableTree *tb = &tbl->tree;
+ return db_select_delete_tree_common(p, tbl, tid, pattern, ret,
+ &tb->static_stack, NULL);
+}
+
+int db_select_replace_continue_tree_common(Process *p,
DbTable *tbl,
Eterm continuation,
- Eterm *ret)
+ Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter)
{
- DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
struct select_replace_context sc;
unsigned sz;
@@ -1764,7 +2022,7 @@ static int db_select_replace_continue_tree(Process *p,
sc.end_condition = NIL;
sc.lastobj = NULL;
sc.max = 1000;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tbl->common.keypos;
if (is_big(tptr[5])) {
sc.replaced = big_to_uint32(tptr[5]);
} else {
@@ -1772,9 +2030,18 @@ static int db_select_replace_continue_tree(Process *p,
}
prev_replaced = sc.replaced;
- stack = get_any_stack(tb);
- traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc);
- release_stack(tb,stack);
+ if (iter) {
+ iter->next_route_key = lastkey;
+ sc.common.root = catree_find_prev_root(iter, NULL);
+ }
+ else {
+ sc.common.root = &tbl->tree.root;
+ }
+
+ stack = get_any_stack(tbl, stack_container);
+ traverse_update_backwards(&tbl->common, stack, lastkey, &doit_select_replace,
+ &sc.common, iter);
+ release_stack(tbl, stack_container,stack);
// the more objects we've replaced, the more reductions we've consumed
BUMP_REDS(p, MIN(2000, (1000 - sc.max) + (sc.replaced - prev_replaced)));
@@ -1782,7 +2049,7 @@ static int db_select_replace_continue_tree(Process *p,
if (sc.max > 0) {
RET_TO_BIF(erts_make_integer(sc.replaced,p), DB_ERROR_NONE);
}
- key = GETKEY(tb, sc.lastobj);
+ key = GETKEY(tbl, sc.lastobj);
if (end_condition != NIL &&
(cmp_partly_bound(end_condition,key) > 0)) {
/* done anyway */
@@ -1813,10 +2080,21 @@ static int db_select_replace_continue_tree(Process *p,
#undef RET_TO_BIF
}
-static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
- Eterm pattern, Eterm *ret)
+static int db_select_replace_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret,
+ enum DbIterSafety* safety_p)
+{
+ return db_select_replace_continue_tree_common(p, tbl, continuation, ret,
+ &tbl->tree, NULL);
+}
+
+int db_select_replace_tree_common(Process *p, DbTable *tbl,
+ Eterm tid, Eterm pattern, Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter)
{
- DbTableTree *tb = &tbl->tree;
DbTreeStack* stack;
struct select_replace_context sc;
struct mp_info mpi;
@@ -1843,48 +2121,64 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
sc.lastobj = NULL;
sc.p = p;
- sc.tb = tb;
+ sc.tb = &tbl->common;
sc.max = 1000;
sc.end_condition = NIL;
- sc.keypos = tb->common.keypos;
+ sc.keypos = tbl->common.keypos;
sc.replaced = 0;
- if ((errcode = analyze_pattern(tb, pattern, db_match_keeps_key, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(&tbl->common, pattern, db_match_keeps_key, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
- if (!mpi.something_can_match) {
+ if (mpi.key_boundness == MS_KEY_IMPOSSIBLE) {
RET_TO_BIF(make_small(0),DB_ERROR_NONE);
/* can't possibly match anything */
}
sc.mp = mpi.mp;
- if (!mpi.got_partial && mpi.some_limitation &&
- CMP_EQ(mpi.least,mpi.most)) {
- doit_select_replace(tb,mpi.save_term,&sc,0 /* dummy */);
- reset_static_stack(tb); /* may refer replaced term */
+ if (mpi.key_boundness == MS_KEY_BOUND) {
+ TreeDbTerm** pp;
+ ASSERT(CMP_EQ(mpi.least, mpi.most));
+ if (iter)
+ sc.common.root = catree_find_root(mpi.least, iter);
+ else
+ sc.common.root = &tbl->tree.root;
+ pp = find_node2(&tbl->common, sc.common.root, mpi.least);
+ if (pp) {
+ doit_select_replace(&tbl->common, pp, &sc.common, 0 /* dummy */);
+ reset_static_stack(stack_container); /* may refer replaced term */
+ }
RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE);
}
- stack = get_any_stack(tb);
+ stack = get_any_stack(tbl,stack_container);
- if (mpi.some_limitation) {
- if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
- lastkey = GETKEY(tb, this->dbterm.tpl);
- }
+ if (mpi.key_boundness == MS_KEY_PARTIALLY_BOUND) {
+ this = find_next_from_pb_key(tbl, &sc.common.root, stack, mpi.most, iter);
+ if (this)
+ lastkey = GETKEY(tbl, this->dbterm.tpl);
sc.end_condition = mpi.least;
}
+ else {
+ ASSERT(mpi.key_boundness == MS_KEY_UNBOUND);
+ if (iter)
+ sc.common.root = catree_find_last_root(iter);
+ else
+ sc.common.root = &tbl->tree.root;
+ }
- traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc);
- release_stack(tb,stack);
+ traverse_update_backwards(&tbl->common, stack, lastkey, &doit_select_replace,
+ &sc.common, iter);
+ release_stack(tbl,stack_container,stack);
// the more objects we've replaced, the more reductions we've consumed
BUMP_REDS(p, MIN(2000, (1000 - sc.max) + sc.replaced));
if (sc.max > 0) {
RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE);
}
- key = GETKEY(tb, sc.lastobj);
+ key = GETKEY(tbl, sc.lastobj);
sz = size_object(key);
if (IS_USMALL(0, sc.replaced)) {
hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
@@ -1914,52 +2208,74 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
}
-static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret,
+ enum DbIterSafety safety)
+{
+ return db_select_replace_tree_common(p, tbl, tid, pattern, ret,
+ &tbl->tree, NULL);
+}
+
+int db_take_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root,
+ Eterm key, Eterm *ret,
+ DbTreeStack *stack /* NULL if no static stack */)
{
- DbTableTree *tb = &tbl->tree;
TreeDbTerm *this;
*ret = NIL;
- this = linkout_tree(tb, key);
+ this = linkout_tree(&tbl->common, root, key, stack);
if (this) {
Eterm copy, *hp, *hend;
hp = HAlloc(p, this->dbterm.size + 2);
hend = hp + this->dbterm.size + 2;
- copy = db_copy_object_from_ets(&tb->common,
+ copy = db_copy_object_from_ets(&tbl->common,
&this->dbterm, &hp, &MSO(p));
*ret = CONS(hp, copy, NIL);
hp += 2;
HRelease(p, hend, hp);
- free_term(tb, this);
+ free_term(tbl, this);
}
return DB_ERROR_NONE;
}
+static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ return db_take_tree_common(p, tbl, &tb->root,
+ key, ret, &tb->static_stack);
+}
+
/*
** Other interface routines (not directly coupled to one bif)
*/
-
-/* Display tree contents (for dump) */
-static void db_print_tree(fmtfn_t to, void *to_arg,
- int show,
- DbTable *tbl)
+void db_print_tree_common(fmtfn_t to, void *to_arg,
+ int show, TreeDbTerm *root, DbTable *tbl)
{
- DbTableTree *tb = &tbl->tree;
#ifdef TREE_DEBUG
if (show)
erts_print(to, to_arg, "\nTree data dump:\n"
"------------------------------------------------\n");
- do_dump_tree2(&tbl->tree, to, to_arg, show, tb->root, 0);
+ do_dump_tree2(&tbl->common, to, to_arg, show, root, 0);
if (show)
erts_print(to, to_arg, "\n"
"------------------------------------------------\n");
#else
- erts_print(to, to_arg, "Ordered set (AVL tree), Elements: %d\n", NITEMS(tb));
+ erts_print(to, to_arg, "Ordered set (AVL tree), Elements: %d\n",
+ erts_flxctr_read_approx(&tbl->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID));
#endif
}
+/* Display tree contents (for dump) */
+static void db_print_tree(fmtfn_t to, void *to_arg,
+ int show,
+ DbTable *tbl)
+{
+ DbTableTree *tb = &tbl->tree;
+ db_print_tree_common(to, to_arg, show, tb->root, tbl);
+}
+
/* release all memory occupied by a single table */
static int db_free_empty_table_tree(DbTable *tbl)
{
@@ -1984,31 +2300,57 @@ static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds)
(DbTable *) tb,
(void *) tb->static_stack.array,
sizeof(TreeDbTerm *) * STACK_NEED);
- ASSERT(erts_atomic_read_nob(&tb->common.memory_size)
- == sizeof(DbTable));
+ ASSERT(erts_flxctr_is_snapshot_ongoing(&tb->common.counters) ||
+ ((APPROX_MEM_CONSUMED(tb)
+ == sizeof(DbTable)) ||
+ (APPROX_MEM_CONSUMED(tb)
+ == (sizeof(DbTable) + sizeof(DbFixation)))));
}
return reds;
}
-static SWord db_delete_all_objects_tree(Process* p, DbTable* tbl, SWord reds)
+static SWord db_delete_all_objects_tree(Process* p,
+ DbTable* tbl,
+ SWord reds,
+ Eterm* nitems_holder_wb)
{
+ if (nitems_holder_wb != NULL) {
+ Uint nr_of_items =
+ erts_flxctr_read_centralized(&tbl->common.counters,
+ ERTS_DB_TABLE_NITEMS_COUNTER_ID);
+ *nitems_holder_wb = erts_make_integer(nr_of_items, p);
+ }
reds = db_free_table_continue_tree(tbl, reds);
if (reds < 0)
return reds;
db_create_tree(p, tbl);
- erts_atomic_set_nob(&tbl->tree.common.nitems, 0);
+ RESET_NITEMS(tbl);
return reds;
}
+static Eterm db_delete_all_objects_get_nitems_from_holder_tree(Process* p,
+ Eterm holder)
+{
+ (void)p;
+ return holder;
+}
+
static void do_db_tree_foreach_offheap(TreeDbTerm *,
void (*)(ErlOffHeap *, void *),
void *);
+void db_foreach_offheap_tree_common(TreeDbTerm *root,
+ void (*func)(ErlOffHeap *, void *),
+ void * arg)
+{
+ do_db_tree_foreach_offheap(root, func, arg);
+}
+
static void db_foreach_offheap_tree(DbTable *tbl,
void (*func)(ErlOffHeap *, void *),
void * arg)
{
- do_db_tree_foreach_offheap(tbl->tree.root, func, arg);
+ db_foreach_offheap_tree_common(tbl->tree.root, func, arg);
}
@@ -2033,13 +2375,14 @@ do_db_tree_foreach_offheap(TreeDbTerm *tdbt,
do_db_tree_foreach_offheap(tdbt->right, func, arg);
}
-static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) {
+static TreeDbTerm *linkout_tree(DbTableCommon *tb, TreeDbTerm **root,
+ Eterm key, DbTreeStack *stack) {
TreeDbTerm **tstack[STACK_NEED];
int tpos = 0;
int dstack[STACK_NEED+1];
int dpos = 0;
int state = 0;
- TreeDbTerm **this = &tb->root;
+ TreeDbTerm **this = root;
Sint c;
int dir;
TreeDbTerm *q = NULL;
@@ -2050,7 +2393,7 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) {
* keep the balance. As in insert, we do the stacking ourselves.
*/
- reset_static_stack(tb);
+ reset_stack(stack);
dstack[dpos++] = DIR_END;
for (;;) {
if (!*this) { /* Failure */
@@ -2076,30 +2419,30 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) {
tstack[tpos++] = this;
state = delsub(this);
}
- erts_atomic_dec_nob(&tb->common.nitems);
+ DEC_NITEMS(((DbTable*)tb));
break;
}
}
while (state && ( dir = dstack[--dpos] ) != DIR_END) {
this = tstack[--tpos];
if (dir == DIR_LEFT) {
- state = balance_left(this);
+ state = tree_balance_left(this);
} else {
- state = balance_right(this);
+ state = tree_balance_right(this);
}
}
return q;
}
-static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
- Eterm object)
+static TreeDbTerm *linkout_object_tree(DbTableCommon *tb, TreeDbTerm **root,
+ Eterm object, DbTableTree *stack)
{
TreeDbTerm **tstack[STACK_NEED];
int tpos = 0;
int dstack[STACK_NEED+1];
int dpos = 0;
int state = 0;
- TreeDbTerm **this = &tb->root;
+ TreeDbTerm **this = root;
Sint c;
int dir;
TreeDbTerm *q = NULL;
@@ -2114,7 +2457,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
key = GETKEY(tb, tuple_val(object));
- reset_static_stack(tb);
+ reset_static_stack(stack);
dstack[dpos++] = DIR_END;
for (;;) {
if (!*this) { /* Failure */
@@ -2128,7 +2471,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
tstack[tpos++] = this;
this = &((*this)->right);
} else { /* Equal key, found the only possible matching object*/
- if (!db_eq(&tb->common,object,&(*this)->dbterm)) {
+ if (!db_eq(tb,object,&(*this)->dbterm)) {
return NULL;
}
q = (*this);
@@ -2143,16 +2486,16 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
tstack[tpos++] = this;
state = delsub(this);
}
- erts_atomic_dec_nob(&tb->common.nitems);
+ DEC_NITEMS(((DbTable*)tb));
break;
}
}
while (state && ( dir = dstack[--dpos] ) != DIR_END) {
this = tstack[--tpos];
if (dir == DIR_LEFT) {
- state = balance_left(this);
+ state = tree_balance_left(this);
} else {
- state = balance_right(this);
+ state = tree_balance_right(this);
}
}
return q;
@@ -2162,7 +2505,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
** For the select functions, analyzes the pattern and determines which
** part of the tree should be searched. Also compiles the match program
*/
-static int analyze_pattern(DbTableTree *tb, Eterm pattern,
+static int analyze_pattern(DbTableCommon *tb, Eterm pattern,
extra_match_validator_t extra_validator, /* Optional callback */
struct mp_info *mpi)
{
@@ -2173,17 +2516,12 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
Eterm *ptpl;
int i;
int num_heads = 0;
- Eterm key;
- Eterm partly_bound;
- int res;
- Eterm least = 0;
- Eterm most = 0;
+ Eterm least = THE_NON_VALUE;
+ Eterm most = THE_NON_VALUE;
+ enum ms_key_boundness boundness;
- mpi->some_limitation = 1;
- mpi->got_partial = 0;
- mpi->something_can_match = 0;
+ mpi->key_boundness = MS_KEY_IMPOSSIBLE;
mpi->mp = NULL;
- mpi->save_term = NULL;
for (lst = pattern; is_list(lst); lst = CDR(list_val(lst)))
++num_heads;
@@ -2204,6 +2542,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
Eterm match;
Eterm guard;
Eterm body;
+ Eterm key;
ttpl = CAR(list_val(lst));
if (!is_tuple(ttpl)) {
@@ -2223,7 +2562,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
guards[i] = guard = ptpl[2];
bodies[i] = body = ptpl[3];
- if(extra_validator != NULL && !extra_validator(tb->common.keypos, match, guard, body)) {
+ if(extra_validator != NULL && !extra_validator(tb->keypos, match, guard, body)) {
if (buff != sbuff) {
erts_free(ERTS_ALC_T_DB_TMP, buff);
}
@@ -2235,30 +2574,29 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
}
++i;
- partly_bound = NIL;
- res = key_given(tb, tpl, &(mpi->save_term), &partly_bound);
- if ( res >= 0 ) { /* Can match something */
- key = 0;
- mpi->something_can_match = 1;
- if (res > 0) {
- key = GETKEY(tb,tuple_val(tpl));
- } else if (partly_bound != NIL) {
- mpi->got_partial = 1;
- key = partly_bound;
- } else {
- mpi->some_limitation = 0;
- }
- if (key != 0) {
- if (least == 0 ||
- partly_bound_can_match_lesser(key,least)) {
- least = key;
- }
- if (most == 0 ||
- partly_bound_can_match_greater(key,most)) {
- most = key;
- }
- }
- }
+ boundness = key_boundness(tb, tpl, &key);
+ switch (boundness)
+ {
+ case MS_KEY_BOUND:
+ case MS_KEY_PARTIALLY_BOUND:
+ if (is_non_value(least) || partly_bound_can_match_lesser(key,least)) {
+ least = key;
+ }
+ if (is_non_value(most) || partly_bound_can_match_greater(key,most)) {
+ most = key;
+ }
+ break;
+ case MS_KEY_IMPOSSIBLE:
+ case MS_KEY_UNBOUND:
+ break;
+ }
+ if (mpi->key_boundness > boundness)
+ mpi->key_boundness = boundness;
+ }
+
+ if (mpi->key_boundness == MS_KEY_BOUND && !CMP_EQ(least, most)) {
+ /* Several different bound keys */
+ mpi->key_boundness = MS_KEY_PARTIALLY_BOUND;
}
mpi->least = least;
mpi->most = most;
@@ -2300,7 +2638,7 @@ static SWord do_free_tree_continue(DbTableTree *tb, SWord reds)
PUSH_NODE(&tb->static_stack, root);
root = p;
} else {
- free_term(tb, root);
+ free_term((DbTable*)tb, root);
if (--reds < 0) {
return reds; /* Done enough for now */
}
@@ -2314,7 +2652,7 @@ static SWord do_free_tree_continue(DbTableTree *tb, SWord reds)
/*
* Deletion helpers
*/
-static int balance_left(TreeDbTerm **this)
+int tree_balance_left(TreeDbTerm **this)
{
TreeDbTerm *p, *p1, *p2;
int b1, b2, h = 1;
@@ -2359,7 +2697,7 @@ static int balance_left(TreeDbTerm **this)
return h;
}
-static int balance_right(TreeDbTerm **this)
+int tree_balance_right(TreeDbTerm **this)
{
TreeDbTerm *p, *p1, *p2;
int b1, b2, h = 1;
@@ -2431,7 +2769,7 @@ static int delsub(TreeDbTerm **this)
h = 1;
while (tpos && h) {
r = tstack[--tpos];
- h = balance_right(r);
+ h = tree_balance_right(r);
}
return h;
}
@@ -2440,11 +2778,30 @@ static int delsub(TreeDbTerm **this)
* Helper for db_slot
*/
-static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot)
+static TreeDbTerm *slot_search(Process *p, TreeDbTerm *root,
+ Sint slot, DbTable *tb,
+ DbTableTree *stack_container,
+ CATreeRootIterator *iter,
+ int* is_EOT)
{
TreeDbTerm *this;
TreeDbTerm *tmp;
- DbTreeStack* stack = get_any_stack(tb);
+ TreeDbTerm *lastobj;
+ Eterm lastkey;
+ TreeDbTerm **pp;
+ DbTreeStack* stack;
+
+ if (iter) {
+ /* Find first non-empty tree */
+ while (!root) {
+ TreeDbTerm** pp = catree_find_next_root(iter, NULL);
+ if (!pp)
+ return NULL;
+ root = *pp;
+ }
+ }
+
+ stack = get_any_stack(tb,stack_container);
ASSERT(stack != NULL);
if (slot == 1) { /* Don't search from where we are if we are
@@ -2456,57 +2813,92 @@ static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot)
are not recorded */
stack->pos = 0;
}
- if (EMPTY_NODE(stack)) {
- this = tb->root;
- if (this == NULL)
- goto done;
- while (this->left != NULL){
- PUSH_NODE(stack, this);
- this = this->left;
- }
- PUSH_NODE(stack, this);
- stack->slot = 1;
- }
- this = TOP_NODE(stack);
- while (stack->slot != slot && this != NULL) {
- if (slot > stack->slot) {
- if (this->right != NULL) {
- this = this->right;
- while (this->left != NULL) {
- PUSH_NODE(stack, this);
- this = this->left;
- }
- PUSH_NODE(stack, this);
- } else {
- for (;;) {
- tmp = POP_NODE(stack);
- this = TOP_NODE(stack);
- if (this == NULL || this->left == tmp)
- break;
- }
- }
- ++(stack->slot);
- } else {
- if (this->left != NULL) {
- this = this->left;
- while (this->right != NULL) {
- PUSH_NODE(stack, this);
- this = this->right;
- }
- PUSH_NODE(stack, this);
- } else {
- for (;;) {
- tmp = POP_NODE(stack);
- this = TOP_NODE(stack);
- if (this == NULL || this->right == tmp)
- break;
- }
- }
- --(stack->slot);
- }
+ while (1) {
+ if (EMPTY_NODE(stack)) {
+ this = root;
+ if (this == NULL)
+ goto next_root;
+ while (this->left != NULL){
+ PUSH_NODE(stack, this);
+ this = this->left;
+ }
+ PUSH_NODE(stack, this);
+ stack->slot++;
+ }
+ this = TOP_NODE(stack);
+ while (stack->slot != slot) {
+ ASSERT(this);
+ lastobj = this;
+ if (slot > stack->slot) {
+ if (this->right != NULL) {
+ this = this->right;
+ while (this->left != NULL) {
+ PUSH_NODE(stack, this);
+ this = this->left;
+ }
+ PUSH_NODE(stack, this);
+ } else {
+ for (;;) {
+ tmp = POP_NODE(stack);
+ this = TOP_NODE(stack);
+ if (!this)
+ goto next_root;
+ if (this->left == tmp)
+ break;
+ }
+ }
+ ++(stack->slot);
+ } else {
+ if (this->left != NULL) {
+ this = this->left;
+ while (this->right != NULL) {
+ PUSH_NODE(stack, this);
+ this = this->right;
+ }
+ PUSH_NODE(stack, this);
+ } else {
+ for (;;) {
+ tmp = POP_NODE(stack);
+ this = TOP_NODE(stack);
+ if (!this)
+ goto next_root;
+ if (this->right == tmp)
+ break;
+ }
+ }
+ --(stack->slot);
+ }
+ }
+ /* Found slot */
+ ASSERT(this);
+ break;
+
+next_root:
+ if (!iter) {
+ if (stack->slot == (slot-1)) {
+ *is_EOT = 1;
+ }
+ break; /* EOT */
+ }
+
+ ASSERT(slot > stack->slot);
+ if (lastobj) {
+ lastkey = GETKEY(tb, lastobj->dbterm.tpl);
+ lastobj = NULL;
+ }
+ pp = catree_find_next_root(iter, &lastkey);
+ if (!pp) {
+ if (stack->slot == (slot-1)) {
+ *is_EOT = 1;
+ }
+ break; /* EOT */
+ }
+ root = *pp;
+ stack->pos = 0;
+ find_next(&tb->common, root, stack, lastkey);
}
-done:
- release_stack(tb,stack);
+
+ release_stack(tb,stack_container,stack);
return this;
}
@@ -2514,7 +2906,8 @@ done:
* Find next and previous in sort order
*/
-static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
+static TreeDbTerm *find_next(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key) {
TreeDbTerm *this;
TreeDbTerm *tmp;
Sint c;
@@ -2526,7 +2919,7 @@ static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
}
}
if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */
- if (( this = tb->root ) == NULL)
+ if (( this = root ) == NULL)
return NULL;
for (;;) {
PUSH_NODE(stack, this);
@@ -2539,7 +2932,7 @@ static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
this = this->right;
} else if (c < 0) {
if (this->left == NULL) /* Done */
- return this;
+ goto found_next;
else
this = this->left;
} else
@@ -2554,8 +2947,6 @@ static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
this = this->left;
PUSH_NODE(stack, this);
}
- if (stack->slot > 0)
- ++(stack->slot);
} else {
do {
tmp = POP_NODE(stack);
@@ -2564,13 +2955,17 @@ static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
return NULL;
}
} while (this->right == tmp);
- if (stack->slot > 0)
- ++(stack->slot);
}
+
+found_next:
+ if (stack->slot > 0)
+ ++(stack->slot);
+
return this;
}
-static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
+static TreeDbTerm *find_prev(DbTableCommon *tb, TreeDbTerm *root,
+ DbTreeStack* stack, Eterm key) {
TreeDbTerm *this;
TreeDbTerm *tmp;
Sint c;
@@ -2582,7 +2977,7 @@ static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
}
}
if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */
- if (( this = tb->root ) == NULL)
+ if (( this = root ) == NULL)
return NULL;
for (;;) {
PUSH_NODE(stack, this);
@@ -2595,7 +2990,7 @@ static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
this = this->left;
} else if (c > 0) {
if (this->right == NULL) /* Done */
- return this;
+ goto found_prev;
else
this = this->right;
} else
@@ -2610,8 +3005,6 @@ static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
this = this->right;
PUSH_NODE(stack, this);
}
- if (stack->slot > 0)
- --(stack->slot);
} else {
do {
tmp = POP_NODE(stack);
@@ -2620,74 +3013,112 @@ static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key) {
return NULL;
}
} while (this->left == tmp);
- if (stack->slot > 0)
- --(stack->slot);
}
+
+found_prev:
+ if (stack->slot > 0)
+ --(stack->slot);
+
return this;
}
-static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
- Eterm key)
+
+/* @brief Find object with smallest key of all larger than partially bound key.
+ * Can be used as a starting point for a reverse iteration with pb_key.
+ *
+ * @param pb_key The partially bound key. Example {42, '$1'}
+ * @param *rootpp Will return pointer to root pointer of tree with found object.
+ * @param iter Root iterator or NULL for plain DbTableTree.
+ * @param stack A stack to use. Will be cleared.
+ *
+ * @return found object or NULL if no such key exists.
+ */
+static TreeDbTerm *find_next_from_pb_key(DbTable *tbl, TreeDbTerm*** rootpp,
+ DbTreeStack* stack, Eterm pb_key,
+ CATreeRootIterator* iter)
{
+ TreeDbTerm* root;
TreeDbTerm *this;
- TreeDbTerm *tmp;
+ Uint candidate = 0;
Sint c;
+ if (iter) {
+ *rootpp = catree_find_next_from_pb_key_root(pb_key, iter);
+ ASSERT(*rootpp);
+ root = **rootpp;
+ }
+ else {
+ *rootpp = &tbl->tree.root;
+ root = tbl->tree.root;
+ }
+
/* spool the stack, we have to "re-search" */
stack->pos = stack->slot = 0;
- if (( this = tb->root ) == NULL)
+ if (( this = root ) == NULL)
return NULL;
for (;;) {
PUSH_NODE(stack, this);
- if (( c = cmp_partly_bound(key,GETKEY(tb, this->dbterm.tpl))) >= 0) {
+ if (( c = cmp_partly_bound(pb_key,GETKEY(tbl, this->dbterm.tpl))) >= 0) {
if (this->right == NULL) {
- do {
- tmp = POP_NODE(stack);
- if (( this = TOP_NODE(stack)) == NULL) {
- return NULL;
- }
- } while (this->right == tmp);
- return this;
- } else
- this = this->right;
+ stack->pos = candidate;
+ return TOP_NODE(stack);
+ }
+ this = this->right;
} else /*if (c < 0)*/ {
if (this->left == NULL) /* Done */
return this;
- else
- this = this->left;
+ candidate = stack->pos;
+ this = this->left;
}
}
}
-static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
- Eterm key)
+/* @brief Find object with largest key of all smaller than partially bound key.
+ * Can be used as a starting point for a forward iteration with pb_key.
+ *
+ * @param pb_key The partially bound key. Example {42, '$1'}
+ * @param *rootpp Will return pointer to root pointer of found object.
+ * @param iter Root iterator or NULL for plain DbTableTree.
+ * @param stack A stack to use. Will be cleared.
+ *
+ * @return found object or NULL if no such key exists.
+ */
+static TreeDbTerm *find_prev_from_pb_key(DbTable *tbl, TreeDbTerm*** rootpp,
+ DbTreeStack* stack, Eterm pb_key,
+ CATreeRootIterator* iter)
{
+ TreeDbTerm* root;
TreeDbTerm *this;
- TreeDbTerm *tmp;
+ Uint candidate = 0;
Sint c;
+ if (iter) {
+ *rootpp = catree_find_prev_from_pb_key_root(pb_key, iter);
+ ASSERT(*rootpp);
+ root = **rootpp;
+ }
+ else {
+ *rootpp = &tbl->tree.root;
+ root = tbl->tree.root;
+ }
+
/* spool the stack, we have to "re-search" */
stack->pos = stack->slot = 0;
- if (( this = tb->root ) == NULL)
+ if (( this = root ) == NULL)
return NULL;
for (;;) {
PUSH_NODE(stack, this);
- if (( c = cmp_partly_bound(key,GETKEY(tb, this->dbterm.tpl))) <= 0) {
+ if (( c = cmp_partly_bound(pb_key,GETKEY(tbl, this->dbterm.tpl))) <= 0) {
if (this->left == NULL) {
- do {
- tmp = POP_NODE(stack);
- if (( this = TOP_NODE(stack)) == NULL) {
- return NULL;
- }
- } while (this->left == tmp);
- return this;
- } else
- this = this->left;
- } else /*if (c < 0)*/ {
+ stack->pos = candidate;
+ return TOP_NODE(stack);
+ }
+ this = this->left;
+ } else /*if (c > 0)*/ {
if (this->right == NULL) /* Done */
return this;
- else
- this = this->right;
+ candidate = stack->pos;
+ this = this->right;
}
}
}
@@ -2696,16 +3127,17 @@ static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
/*
* Just lookup a node
*/
-static TreeDbTerm *find_node(DbTableTree *tb, Eterm key)
+static TreeDbTerm *find_node(DbTableCommon *tb, TreeDbTerm *root,
+ Eterm key, DbTableTree *stack_container)
{
TreeDbTerm *this;
Sint res;
- DbTreeStack* stack = get_static_stack(tb);
+ DbTreeStack* stack = get_static_stack(stack_container);
if(!stack || EMPTY_NODE(stack)
|| !cmp_key_eq(tb, key, (this=TOP_NODE(stack)))) {
- this = tb->root;
+ this = root;
while (this != NULL && (res = cmp_key(tb,key,this)) != 0) {
if (res < 0)
this = this->left;
@@ -2714,7 +3146,7 @@ static TreeDbTerm *find_node(DbTableTree *tb, Eterm key)
}
}
if (stack) {
- release_stack(tb,stack);
+ release_stack((DbTable*)tb,stack_container,stack);
}
return this;
}
@@ -2722,12 +3154,12 @@ static TreeDbTerm *find_node(DbTableTree *tb, Eterm key)
/*
* Lookup a node and return the address of the node pointer in the tree
*/
-static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key)
+static TreeDbTerm **find_node2(DbTableCommon *tb, TreeDbTerm **root, Eterm key)
{
TreeDbTerm **this;
Sint res;
- this = &tb->root;
+ this = root;
while ((*this) != NULL && (res = cmp_key(tb, key, *this)) != 0) {
if (res < 0)
this = &((*this)->left);
@@ -2744,7 +3176,8 @@ static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key)
* Tries to reuse the existing stack for performance.
*/
-static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *this) {
+static TreeDbTerm **find_ptr(DbTableCommon *tb, TreeDbTerm **root,
+ DbTreeStack *stack, TreeDbTerm *this) {
Eterm key = GETKEY(tb, this->dbterm.tpl);
TreeDbTerm *tmp;
TreeDbTerm *parent;
@@ -2757,7 +3190,7 @@ static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *th
}
}
if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */
- if (( tmp = tb->root ) == NULL)
+ if (( tmp = *root ) == NULL)
return NULL;
for (;;) {
PUSH_NODE(stack, tmp);
@@ -2783,7 +3216,7 @@ static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *th
parent = TOPN_NODE(stack, 1);
if (parent == NULL)
- return ((this != tb->root) ? NULL : &(tb->root));
+ return ((this != *root) ? NULL : root);
if (parent->left == this)
return &(parent->left);
if (parent->right == this)
@@ -2791,12 +3224,11 @@ static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *th
return NULL;
}
-static int
-db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
- DbUpdateHandle* handle)
+int db_lookup_dbterm_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root,
+ Eterm key, Eterm obj, DbUpdateHandle* handle,
+ DbTableTree *stack_container)
{
- DbTableTree *tb = &tbl->tree;
- TreeDbTerm **pp = find_node2(tb, key);
+ TreeDbTerm **pp = find_node2(&tbl->common, root, key);
int flags = 0;
if (pp == NULL) {
@@ -2807,18 +3239,19 @@ db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
int arity = arityval(*objp);
Eterm *htop, *hend;
- ASSERT(arity >= tb->common.keypos);
+ ASSERT(arity >= tbl->common.keypos);
htop = HAlloc(p, arity + 1);
hend = htop + arity + 1;
sys_memcpy(htop, objp, sizeof(Eterm) * (arity + 1));
- htop[tb->common.keypos] = key;
+ htop[tbl->common.keypos] = key;
obj = make_tuple(htop);
- if (db_put_tree(tbl, obj, 1) != DB_ERROR_NONE) {
+ if (db_put_tree_common(&tbl->common, root,
+ obj, 1, stack_container) != DB_ERROR_NONE) {
return 0;
}
- pp = find_node2(tb, key);
+ pp = find_node2(&tbl->common, root, key);
ASSERT(pp != NULL);
HRelease(p, hend, htop);
flags |= DB_NEW_OBJECT;
@@ -2833,21 +3266,28 @@ db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
return 1;
}
-static void
-db_finalize_dbterm_tree(int cret, DbUpdateHandle *handle)
+static int
+db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
+ DbUpdateHandle* handle)
{
- DbTable *tbl = handle->tb;
DbTableTree *tb = &tbl->tree;
+ return db_lookup_dbterm_tree_common(p, tbl, &tb->root, key, obj, handle, tb);
+}
+
+void db_finalize_dbterm_tree_common(int cret, DbUpdateHandle *handle,
+ DbTableTree *stack_container)
+{
+ DbTable *tbl = handle->tb;
TreeDbTerm *bp = (TreeDbTerm *) *handle->bp;
if (handle->flags & DB_NEW_OBJECT && cret != DB_ERROR_NONE) {
Eterm ret;
- db_erase_tree(tbl, GETKEY(tb, bp->dbterm.tpl), &ret);
+ db_erase_tree(tbl, GETKEY(&tbl->common, bp->dbterm.tpl), &ret);
} else if (handle->flags & DB_MUST_RESIZE) {
db_finalize_resize(handle, offsetof(TreeDbTerm,dbterm));
- reset_static_stack(tb);
+ reset_static_stack(stack_container);
- free_term(tb, bp);
+ free_term(tbl, bp);
}
#ifdef DEBUG
handle->dbterm = 0;
@@ -2855,156 +3295,207 @@ db_finalize_dbterm_tree(int cret, DbUpdateHandle *handle)
return;
}
+static void
+db_finalize_dbterm_tree(int cret, DbUpdateHandle *handle)
+{
+ DbTable *tbl = handle->tb;
+ DbTableTree *tb = &tbl->tree;
+ db_finalize_dbterm_tree_common(cret, handle, tb);
+}
+
/*
* Traverse the tree with a callback function, used by db_match_xxx
*/
-static void traverse_backwards(DbTableTree *tb,
+static void traverse_backwards(DbTableCommon *tb,
DbTreeStack* stack,
Eterm lastkey,
- int (*doit)(DbTableTree *,
- TreeDbTerm *,
- void *,
- int),
- void *context)
+ traverse_doit_funcT* doit,
+ struct select_common *context,
+ CATreeRootIterator* iter)
{
TreeDbTerm *this, *next;
+ TreeDbTerm** root = context->root;
if (lastkey == THE_NON_VALUE) {
- stack->pos = stack->slot = 0;
- if (( this = tb->root ) == NULL) {
- return;
- }
- while (this != NULL) {
- PUSH_NODE(stack, this);
- this = this->right;
- }
- this = TOP_NODE(stack);
- next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl));
- if (!((*doit)(tb, this, context, 0)))
- return;
+ if (iter) {
+ while (*root == NULL) {
+ root = catree_find_prev_root(iter, NULL);
+ if (!root)
+ return;
+ }
+ context->root = root;
+ }
+ stack->pos = stack->slot = 0;
+ next = *root;
+ while (next != NULL) {
+ PUSH_NODE(stack, next);
+ next = next->right;
+ }
+ next = TOP_NODE(stack);
} else {
- next = find_prev(tb, stack, lastkey);
+ next = find_prev(tb, *root, stack, lastkey);
}
- while ((this = next) != NULL) {
- next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl));
- if (!((*doit)(tb, this, context, 0)))
- return;
+ while (1) {
+ while (next) {
+ this = next;
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ next = find_prev(tb, *root, stack, lastkey);
+ if (!((*doit)(tb, this, context, 0)))
+ return;
+ }
+
+ if (!iter)
+ return;
+ ASSERT(is_value(lastkey));
+ root = catree_find_prev_root(iter, &lastkey);
+ if (!root)
+ return;
+ context->root = root;
+ stack->pos = stack->slot = 0;
+ next = find_prev(tb, *root, stack, lastkey);
}
}
/*
* Traverse the tree with a callback function, used by db_match_xxx
*/
-static void traverse_forward(DbTableTree *tb,
+static void traverse_forward(DbTableCommon *tb,
DbTreeStack* stack,
Eterm lastkey,
- int (*doit)(DbTableTree *,
- TreeDbTerm *,
- void *,
- int),
- void *context)
+ traverse_doit_funcT* doit,
+ struct select_common *context,
+ CATreeRootIterator* iter)
{
TreeDbTerm *this, *next;
+ TreeDbTerm **root = context->root;
if (lastkey == THE_NON_VALUE) {
- stack->pos = stack->slot = 0;
- if (( this = tb->root ) == NULL) {
- return;
- }
- while (this != NULL) {
- PUSH_NODE(stack, this);
- this = this->left;
- }
- this = TOP_NODE(stack);
- next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl));
- if (!((*doit)(tb, this, context, 1)))
- return;
+ if (iter) {
+ while (*root == NULL) {
+ root = catree_find_next_root(iter, NULL);
+ if (!root)
+ return;
+ }
+ context->root = root;
+ }
+ stack->pos = stack->slot = 0;
+ next = *root;
+ while (next != NULL) {
+ PUSH_NODE(stack, next);
+ next = next->left;
+ }
+ next = TOP_NODE(stack);
} else {
- next = find_next(tb, stack, lastkey);
+ next = find_next(tb, *root, stack, lastkey);
}
- while ((this = next) != NULL) {
- next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl));
- if (!((*doit)(tb, this, context, 1)))
- return;
+ while (1) {
+ while (next) {
+ this = next;
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ next = find_next(tb, *root, stack, lastkey);
+ if (!((*doit)(tb, this, context, 1)))
+ return;
+ }
+
+ if (!iter)
+ return;
+ ASSERT(is_value(lastkey));
+ root = catree_find_next_root(iter, &lastkey);
+ if (!root)
+ return;
+ context->root = root;
+ stack->pos = stack->slot = 0;
+ next = find_next(tb, *root, stack, lastkey);
}
}
/*
* Traverse the tree with an update callback function, used by db_select_replace
*/
-static void traverse_update_backwards(DbTableTree *tb,
+static void traverse_update_backwards(DbTableCommon *tb,
DbTreeStack* stack,
Eterm lastkey,
- int (*doit)(DbTableTree*,
+ int (*doit)(DbTableCommon*,
TreeDbTerm**,
- void*,
+ struct select_common*,
int),
- void* context)
+ struct select_common* context,
+ CATreeRootIterator* iter)
{
int res;
TreeDbTerm *this, *next, **this_ptr;
+ TreeDbTerm** root = context->root;
if (lastkey == THE_NON_VALUE) {
- stack->pos = stack->slot = 0;
- if (( this = tb->root ) == NULL) {
- return;
+ if (iter) {
+ while (*root == NULL) {
+ root = catree_find_prev_root(iter, NULL);
+ if (!root)
+ return;
+ context->root = root;
+ }
}
- while (this != NULL) {
- PUSH_NODE(stack, this);
- this = this->right;
+ stack->pos = stack->slot = 0;
+ next = *root;
+ while (next) {
+ PUSH_NODE(stack, next);
+ next = next->right;
}
- this = TOP_NODE(stack);
- this_ptr = find_ptr(tb, stack, this);
- ASSERT(this_ptr != NULL);
- res = (*doit)(tb, this_ptr, context, 0);
- REPLACE_TOP_NODE(stack, *this_ptr);
- next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
- if (!res)
- return;
- } else {
- next = find_prev(tb, stack, lastkey);
+ next = TOP_NODE(stack);
}
+ else
+ next = find_prev(tb, *root, stack, lastkey);
+
+
+ while (1) {
+ while (next) {
+ this = next;
+ this_ptr = find_ptr(tb, root, stack, this);
+ ASSERT(this_ptr != NULL);
+ res = (*doit)(tb, this_ptr, context, 0);
+ this = *this_ptr;
+ REPLACE_TOP_NODE(stack, this);
+ if (!res)
+ return;
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ next = find_prev(tb, *root, stack, lastkey);
+ }
- while ((this = next) != NULL) {
- this_ptr = find_ptr(tb, stack, this);
- ASSERT(this_ptr != NULL);
- res = (*doit)(tb, this_ptr, context, 0);
- REPLACE_TOP_NODE(stack, *this_ptr);
- next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
- if (!res)
+ if (!iter)
+ return;
+ ASSERT(is_value(lastkey));
+ root = catree_find_prev_root(iter, &lastkey);
+ if (!root)
return;
+ context->root = root;
+ stack->pos = stack->slot = 0;
+ next = find_prev(tb, *root, stack, lastkey);
}
}
-/*
- * Returns 0 if not given 1 if given and -1 on no possible match
- * if key is given; *ret is set to point to the object concerned.
- */
-static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret,
- Eterm *partly_bound)
+static enum ms_key_boundness key_boundness(DbTableCommon *tb,
+ Eterm pattern, Eterm *keyp)
{
- TreeDbTerm **this;
Eterm key;
- ASSERT(ret != NULL);
if (pattern == am_Underscore || db_is_variable(pattern) != -1)
- return 0;
- key = db_getkey(tb->common.keypos, pattern);
+ return MS_KEY_UNBOUND;
+ key = db_getkey(tb->keypos, pattern);
if (is_non_value(key))
- return -1; /* can't possibly match anything */
+ return MS_KEY_IMPOSSIBLE; /* can't possibly match anything */
if (!db_has_variable(key)) { /* Bound key */
- if (( this = find_node2(tb, key) ) == NULL) {
- return -1;
- }
- *ret = this;
- return 1;
- } else if (partly_bound != NULL && key != am_Underscore &&
- db_is_variable(key) < 0 && !db_has_map(key))
- *partly_bound = key;
+ *keyp = key;
+ return MS_KEY_BOUND;
+ } else if (key != am_Underscore &&
+ db_is_variable(key) < 0 && !db_has_map(key)) {
+
+ *keyp = key;
+ return MS_KEY_PARTIALLY_BOUND;
+ }
- return 0;
+ return MS_KEY_UNBOUND;
}
@@ -3072,7 +3563,8 @@ static Sint do_cmp_partly_bound(Eterm a, Eterm b, int *done)
}
}
-static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key) {
+Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key)
+{
int done = 0;
Sint ret = do_cmp_partly_bound(partly_bound_key, bound_key, &done);
#ifdef HARDDEBUG
@@ -3118,7 +3610,7 @@ static int partly_bound_can_match_lesser(Eterm partly_bound_1,
if (ret)
erts_fprintf(stderr," can match lesser than ");
else
- erts_fprintf(stderr," can not match lesser than ");
+ erts_fprintf(stderr," cannot match lesser than ");
erts_fprintf(stderr,"%T\n",partly_bound_2);
#endif
return ret;
@@ -3136,7 +3628,7 @@ static int partly_bound_can_match_greater(Eterm partly_bound_1,
if (ret)
erts_fprintf(stderr," can match greater than ");
else
- erts_fprintf(stderr," can not match greater than ");
+ erts_fprintf(stderr," cannot match greater than ");
erts_fprintf(stderr,"%T\n",partly_bound_2);
#endif
return ret;
@@ -3288,7 +3780,8 @@ static int do_partly_bound_can_match_greater(Eterm a, Eterm b,
* Callback functions for the different match functions
*/
-static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+static int doit_select(DbTableCommon *tb, TreeDbTerm *this,
+ struct select_common* ptr,
int forward)
{
struct select_context *sc = (struct select_context *) ptr;
@@ -3306,7 +3799,7 @@ static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0))) {
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, &hp, 2);
+ ret = db_match_dbterm(tb, sc->p,sc->mp, &this->dbterm, &hp, 2);
if (is_value(ret)) {
sc->accum = CONS(hp, ret, sc->accum);
}
@@ -3316,7 +3809,8 @@ static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 1;
}
-static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+static int doit_select_count(DbTableCommon *tb, TreeDbTerm *this,
+ struct select_common* ptr,
int forward)
{
struct select_count_context *sc = (struct select_count_context *) ptr;
@@ -3330,7 +3824,7 @@ static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0)) {
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, NULL, 0);
+ ret = db_match_dbterm(tb, sc->p, sc->mp, &this->dbterm, NULL, 0);
if (ret == am_true) {
++(sc->got);
}
@@ -3340,7 +3834,8 @@ static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 1;
}
-static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+static int doit_select_chunk(DbTableCommon *tb, TreeDbTerm *this,
+ struct select_common* ptr,
int forward)
{
struct select_context *sc = (struct select_context *) ptr;
@@ -3359,7 +3854,7 @@ static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, &hp, 2);
+ ret = db_match_dbterm(tb, sc->p, sc->mp, &this->dbterm, &hp, 2);
if (is_value(ret)) {
++(sc->got);
sc->accum = CONS(hp, ret, sc->accum);
@@ -3371,7 +3866,8 @@ static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr,
}
-static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+static int doit_select_delete(DbTableCommon *tb, TreeDbTerm *this,
+ struct select_common *ptr,
int forward)
{
struct select_delete_context *sc = (struct select_delete_context *) ptr;
@@ -3379,7 +3875,7 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
Eterm key;
if (sc->erase_lastterm)
- free_term(tb, sc->lastterm);
+ free_term((DbTable*)tb, sc->lastterm);
sc->erase_lastterm = 0;
sc->lastterm = this;
@@ -3387,10 +3883,10 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
cmp_partly_bound(sc->end_condition,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0)
return 0;
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, NULL, 0);
+ ret = db_match_dbterm(tb, sc->p, sc->mp, &this->dbterm, NULL, 0);
if (ret == am_true) {
key = GETKEY(sc->tb, this->dbterm.tpl);
- linkout_tree(sc->tb, key);
+ linkout_tree(sc->tb, sc->common.root, key, sc->stack);
sc->erase_lastterm = 1;
++sc->accum;
}
@@ -3400,7 +3896,8 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 1;
}
-static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
+static int doit_select_replace(DbTableCommon *tb, TreeDbTerm **this,
+ struct select_common* ptr,
int forward)
{
struct select_replace_context *sc = (struct select_replace_context *) ptr;
@@ -3414,13 +3911,13 @@ static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
GETKEY_WITH_POS(sc->keypos, (*this)->dbterm.tpl)) > 0)) {
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &(*this)->dbterm, NULL, 0);
+ ret = db_match_dbterm(tb, sc->p, sc->mp, &(*this)->dbterm, NULL, 0);
if (is_value(ret)) {
TreeDbTerm* new;
TreeDbTerm* old = *this;
#ifdef DEBUG
- Eterm key = db_getkey(tb->common.keypos, ret);
+ Eterm key = db_getkey(tb->keypos, ret);
ASSERT(is_value(key));
ASSERT(cmp_key(tb, key, old) == 0);
#endif
@@ -3430,7 +3927,7 @@ static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
new->balance = old->balance;
sc->lastobj = new->dbterm.tpl;
*this = new;
- free_term(tb, old);
+ free_term((DbTable*)tb, old);
++(sc->replaced);
}
if (--(sc->max) <= 0) {
@@ -3440,7 +3937,7 @@ static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
}
#ifdef TREE_DEBUG
-static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
+static void do_dump_tree2(DbTableCommon* tb, int to, void *to_arg, int show,
TreeDbTerm *t, int offset)
{
if (t == NULL)
@@ -3449,7 +3946,7 @@ static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
if (show) {
const char* prefix;
Eterm term;
- if (tb->common.compress) {
+ if (tb->compress) {
prefix = "key=";
term = GETKEY(tb, t->dbterm.tpl);
}
@@ -3504,7 +4001,7 @@ static void check_slot_pos(DbTableTree *tb)
"element position %d is really 0x%08X, when stack says "
"it's 0x%08X\n", tb->stack.slot, t,
tb->stack.array[tb->stack.pos - 1]);
- do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
}
}
@@ -3519,14 +4016,14 @@ static void check_saved_stack(DbTableTree *tb)
if (t != stack->array[0]) {
erts_fprintf(stderr,"tb->stack[0] is 0x%08X, should be 0x%08X\n",
stack->array[0], t);
- do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
return;
}
while (n < stack->pos) {
if (t == NULL) {
erts_fprintf(stderr, "NULL pointer in tree when stack not empty,"
" stack depth is %d\n", n);
- do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
return;
}
n++;
@@ -3540,7 +4037,7 @@ static void check_saved_stack(DbTableTree *tb)
"represent child pointer in tree!"
"(left == 0x%08X, right == 0x%08X\n",
n, tb->stack[n], t->left, t->right);
- do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
return;
}
}
@@ -3559,7 +4056,7 @@ static int check_table_tree(DbTableTree* tb, TreeDbTerm *t)
erts_fprintf(stderr,"balance = %d, left = 0x%08X, right = 0x%08X\n",
t->balance, t->left, t->right);
erts_fprintf(stderr,"\nDump:\n---------------------------------\n");
- do_dump_tree2(tb, ERTS_PRINT_STDERR, NULL, 1, t, 0);
+ do_dump_tree2(&tb->common, ERTS_PRINT_STDERR, NULL, 1, t, 0);
erts_fprintf(stderr,"\n---------------------------------\n");
}
return ((rh > lh) ? rh : lh) + 1;
diff --git a/erts/emulator/beam/erl_db_tree_util.h b/erts/emulator/beam/erl_db_tree_util.h
new file mode 100644
index 0000000000..ba4a8f79e5
--- /dev/null
+++ b/erts/emulator/beam/erl_db_tree_util.h
@@ -0,0 +1,174 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef _DB_TREE_UTIL_H
+#define _DB_TREE_UTIL_H
+
+/*
+** Internal functions and macros used by both the CA tree and the AVL tree
+*/
+
+
+#if defined(ARCH_32)
+/*
+** A stack of this size is enough for an AVL tree with more than
+** 0xFFFFFFFF elements. May be subject to change if
+** the datatype of the element counter is changed to a 64 bit integer.
+** The Maximal height of an AVL tree is calculated as:
+** h(n) <= 1.4404 * log(n + 2) - 0.328
+** Where n denotes the number of nodes, h(n) the height of the tree
+** with n nodes and log is the binary logarithm.
+*/
+#define STACK_NEED 50
+#elif defined(ARCH_64)
+/*
+** A stack of this size is enough for an AVL tree with more than
+** 2^61 elements.
+** The Maximal height of an AVL tree is calculated as above.
+*/
+#define STACK_NEED 90
+#else
+#error "Unsported architecture"
+#endif
+
+
+
+#define PUSH_NODE(Dtt, Tdt) \
+ ((Dtt)->array[(Dtt)->pos++] = Tdt)
+
+#define POP_NODE(Dtt) \
+ (((Dtt)->pos) ? \
+ (Dtt)->array[--((Dtt)->pos)] : NULL)
+
+#define TOP_NODE(Dtt) \
+ ((Dtt->pos) ? \
+ (Dtt)->array[(Dtt)->pos - 1] : NULL)
+
+#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL)
+
+#define DEC_NITEMS(DB) \
+ erts_flxctr_dec(&(DB)->common.counters, ERTS_DB_TABLE_NITEMS_COUNTER_ID)
+
+static ERTS_INLINE void free_term(DbTable *tb, TreeDbTerm* p)
+{
+ db_free_term(tb, p, offsetof(TreeDbTerm, dbterm));
+}
+
+/*
+** Some macros for "direction stacks"
+*/
+#define DIR_LEFT 0
+#define DIR_RIGHT 1
+#define DIR_END 2
+
+static ERTS_INLINE Sint cmp_key(DbTableCommon* tb, Eterm key, TreeDbTerm* obj) {
+ return CMP(key, GETKEY(tb,obj->dbterm.tpl));
+}
+
+int tree_balance_left(TreeDbTerm **this);
+int tree_balance_right(TreeDbTerm **this);
+
+int db_first_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm *ret, DbTableTree *stack_container);
+int db_next_tree_common(Process *p, DbTable *tbl,
+ TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTreeStack* stack);
+int db_last_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm *ret, DbTableTree *stack_container);
+int db_prev_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTreeStack* stack);
+int db_put_tree_common(DbTableCommon *tb, TreeDbTerm **root, Eterm obj,
+ int key_clash_fail, DbTableTree *stack_container);
+int db_get_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key,
+ Eterm *ret, DbTableTree *stack_container);
+int db_get_element_tree_common(Process *p, DbTableCommon *tb, TreeDbTerm *root, Eterm key,
+ int ndex, Eterm *ret, DbTableTree *stack_container);
+int db_member_tree_common(DbTableCommon *tb, TreeDbTerm *root, Eterm key, Eterm *ret,
+ DbTableTree *stack_container);
+int db_erase_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm key, Eterm *ret,
+ DbTreeStack *stack /* NULL if no static stack */);
+int db_erase_object_tree_common(DbTable *tbl, TreeDbTerm **root, Eterm object,
+ Eterm *ret, DbTableTree *stack_container);
+int db_slot_tree_common(Process *p, DbTable *tbl, TreeDbTerm *root,
+ Eterm slot_term, Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator*);
+int db_select_chunk_tree_common(Process *p, DbTable *tb,
+ Eterm tid, Eterm pattern, Sint chunk_size,
+ int reverse, Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator*);
+int db_select_tree_common(Process *p, DbTable *tb,
+ Eterm tid, Eterm pattern, int reverse, Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator*);
+int db_select_delete_tree_common(Process *p, DbTable *tbl,
+ Eterm tid, Eterm pattern,
+ Eterm *ret,
+ DbTreeStack* stack,
+ CATreeRootIterator* iter);
+int db_select_continue_tree_common(Process *p,
+ DbTableCommon *tb,
+ Eterm continuation,
+ Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter);
+int db_select_delete_continue_tree_common(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret,
+ DbTreeStack* stack,
+ CATreeRootIterator* iter);
+int db_select_count_tree_common(Process *p, DbTable *tb,
+ Eterm tid, Eterm pattern, Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter);
+int db_select_count_continue_tree_common(Process *p,
+ DbTable *tb,
+ Eterm continuation,
+ Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter);
+int db_select_replace_tree_common(Process *p, DbTable*,
+ Eterm tid, Eterm pattern, Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter);
+int db_select_replace_continue_tree_common(Process *p,
+ DbTable*,
+ Eterm continuation,
+ Eterm *ret,
+ DbTableTree *stack_container,
+ CATreeRootIterator* iter);
+int db_take_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root,
+ Eterm key, Eterm *ret,
+ DbTreeStack *stack /* NULL if no static stack */);
+void db_print_tree_common(fmtfn_t to, void *to_arg,
+ int show, TreeDbTerm *root, DbTable *tbl);
+void db_foreach_offheap_tree_common(TreeDbTerm *root,
+ void (*func)(ErlOffHeap *, void *),
+ void * arg);
+int db_lookup_dbterm_tree_common(Process *p, DbTable *tbl, TreeDbTerm **root,
+ Eterm key, Eterm obj, DbUpdateHandle* handle,
+ DbTableTree *stack_container);
+void db_finalize_dbterm_tree_common(int cret, DbUpdateHandle *handle,
+ DbTableTree *stack_container);
+Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key);
+
+#endif /* _DB_TREE_UTIL_H */
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index e2c029c244..1ea7074d21 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -497,6 +497,7 @@ static erts_atomic32_t trace_control_word;
/* This needs to be here, before the bif table... */
static Eterm db_set_trace_control_word_fake_1(BIF_ALIST_1);
+static Eterm db_length_1(BIF_ALIST_1);
/*
** The table of callable bif's, i e guard bif's and
@@ -603,7 +604,7 @@ static DMCGuardBif guard_tab[] =
},
{
am_length,
- &length_1,
+ &db_length_1,
1,
DBIF_ALL
},
@@ -971,6 +972,26 @@ BIF_RETTYPE db_set_trace_control_word_1(BIF_ALIST_1)
BIF_RET(db_set_trace_control_word(BIF_P, BIF_ARG_1));
}
+/*
+ * Implementation of length/1 for match specs (non-trapping).
+ */
+static Eterm db_length_1(BIF_ALIST_1)
+{
+ Eterm list;
+ Uint i;
+
+ list = BIF_ARG_1;
+ i = 0;
+ while (is_list(list)) {
+ i++;
+ list = CDR(list_val(list));
+ }
+ if (is_not_nil(list)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(make_small(i));
+}
+
static Eterm db_set_trace_control_word_fake_1(BIF_ALIST_1)
{
Process *p = BIF_P;
@@ -3118,9 +3139,7 @@ void* db_store_term_comp(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
Uint new_sz = offset + db_size_dbterm_comp(tb, obj);
byte* basep;
DbTerm* newp;
-#ifdef DEBUG
byte* top;
-#endif
ASSERT(tb->compress);
if (old != 0) {
@@ -3142,11 +3161,8 @@ void* db_store_term_comp(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
}
newp->size = size_object(obj);
-#ifdef DEBUG
- top =
-#endif
- copy_to_comp(tb, obj, newp, new_sz);
- ASSERT(top <= basep + new_sz);
+ top = copy_to_comp(tb, obj, newp, new_sz);
+ ASSERT(top <= basep + new_sz); (void)top;
/* ToDo: Maybe realloc if ((basep+new_sz) - top) > WASTED_SPACE_LIMIT */
@@ -3296,7 +3312,7 @@ void db_cleanup_offheap_comp(DbTerm* obj)
default:
ASSERT(is_external_header(u.hdr->thing_word));
ASSERT(u.pb != &tmp);
- erts_deref_node_entry(u.ext->node);
+ erts_deref_node_entry(u.ext->node, make_boxed(u.ep));
break;
}
}
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 6ec3b4f98f..02d4dd6c9a 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -21,6 +21,7 @@
#ifndef _DB_UTIL_H
#define _DB_UTIL_H
+#include "erl_flxctr.h"
#include "global.h"
#include "erl_message.h"
#include "erl_bif_unique.h"
@@ -89,9 +90,26 @@ typedef struct {
void** bp; /* {Hash|Tree}DbTerm** */
Uint new_size;
int flags;
- void* lck;
+ union {
+ struct {
+ erts_rwmtx_t* lck;
+ } hash;
+ struct {
+ struct DbTableCATreeNode* base_node;
+ struct DbTableCATreeNode* parent;
+ int current_level;
+ } catree;
+ } u;
} DbUpdateHandle;
+/* How safe are we from double-hits or missed objects
+ * when iterating without fixation?
+ */
+enum DbIterSafety {
+ ITER_UNSAFE, /* Must fixate to be safe */
+ ITER_SAFE_LOCKED, /* Safe while table is locked, not between trap calls */
+ ITER_SAFE /* No need to fixate at all */
+};
typedef struct db_table_method
{
@@ -141,48 +159,61 @@ typedef struct db_table_method
Eterm pattern,
Sint chunk_size,
int reverse,
- Eterm* ret);
+ Eterm* ret,
+ enum DbIterSafety);
int (*db_select)(Process* p,
DbTable* tb, /* [in out] */
Eterm tid,
Eterm pattern,
int reverse,
- Eterm* ret);
+ Eterm* ret,
+ enum DbIterSafety);
int (*db_select_delete)(Process* p,
DbTable* tb, /* [in out] */
Eterm tid,
Eterm pattern,
- Eterm* ret);
+ Eterm* ret,
+ enum DbIterSafety);
int (*db_select_continue)(Process* p,
DbTable* tb, /* [in out] */
Eterm continuation,
- Eterm* ret);
+ Eterm* ret,
+ enum DbIterSafety*);
int (*db_select_delete_continue)(Process* p,
DbTable* tb, /* [in out] */
Eterm continuation,
- Eterm* ret);
+ Eterm* ret,
+ enum DbIterSafety*);
int (*db_select_count)(Process* p,
DbTable* tb, /* [in out] */
Eterm tid,
Eterm pattern,
- Eterm* ret);
+ Eterm* ret,
+ enum DbIterSafety);
int (*db_select_count_continue)(Process* p,
DbTable* tb, /* [in out] */
Eterm continuation,
- Eterm* ret);
+ Eterm* ret,
+ enum DbIterSafety*);
int (*db_select_replace)(Process* p,
DbTable* tb, /* [in out] */
Eterm tid,
Eterm pattern,
- Eterm* ret);
+ Eterm* ret,
+ enum DbIterSafety);
int (*db_select_replace_continue)(Process* p,
DbTable* tb, /* [in out] */
Eterm continuation,
- Eterm* ret);
+ Eterm* ret,
+ enum DbIterSafety*);
int (*db_take)(Process *, DbTable *, Eterm, Eterm *);
- SWord (*db_delete_all_objects)(Process* p, DbTable* db, SWord reds);
-
+ SWord (*db_delete_all_objects)(Process* p,
+ DbTable* db,
+ SWord reds,
+ Eterm* nitems_holder_wb);
+ Eterm (*db_delete_all_objects_get_nitems_from_holder)(Process* p,
+ Eterm nitems_holder);
int (*db_free_empty_table)(DbTable* db);
SWord (*db_free_table_continue)(DbTable* db, SWord reds);
@@ -231,6 +262,9 @@ typedef struct {
DbTable *prev;
} DbTableList;
+#define ERTS_DB_TABLE_NITEMS_COUNTER_ID 0
+#define ERTS_DB_TABLE_MEM_COUNTER_ID 1
+
/*
* This structure contains data for all different types of database
* tables. Note that these fields must match the same fields
@@ -255,8 +289,11 @@ typedef struct db_table_common {
Eterm the_name; /* an atom */
Binary *btid;
DbTableMethod* meth; /* table methods */
- erts_atomic_t nitems; /* Total number of items in table */
- erts_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */
+ /* The ErtsFlxCtr below contains:
+ * - Total number of items in table
+ * - Total memory size (NOTE: in bytes!) */
+ ErtsFlxCtr counters;
+ char extra_for_flxctr[ERTS_FLXCTR_NR_OF_EXTRA_BYTES(2)];
struct { /* Last fixation time */
ErtsMonotonicTime monotonic;
ErtsMonotonicTime offset;
@@ -274,23 +311,29 @@ typedef struct db_table_common {
} DbTableCommon;
/* These are status bit patterns */
-#define DB_PRIVATE (1 << 0)
-#define DB_PROTECTED (1 << 1)
-#define DB_PUBLIC (1 << 2)
-#define DB_DELETE (1 << 3) /* table is being deleted */
-#define DB_SET (1 << 4)
-#define DB_BAG (1 << 5)
-#define DB_DUPLICATE_BAG (1 << 6)
-#define DB_ORDERED_SET (1 << 7)
-#define DB_FINE_LOCKED (1 << 8) /* write_concurrency */
-#define DB_FREQ_READ (1 << 9) /* read_concurrency */
-#define DB_NAMED_TABLE (1 << 10)
-#define DB_BUSY (1 << 11)
+#define DB_PRIVATE (1 << 0)
+#define DB_PROTECTED (1 << 1)
+#define DB_PUBLIC (1 << 2)
+#define DB_DELETE (1 << 3) /* table is being deleted */
+#define DB_SET (1 << 4)
+#define DB_BAG (1 << 5)
+#define DB_DUPLICATE_BAG (1 << 6)
+#define DB_ORDERED_SET (1 << 7)
+#define DB_CA_ORDERED_SET (1 << 8)
+#define DB_FINE_LOCKED (1 << 9) /* write_concurrency */
+#define DB_FREQ_READ (1 << 10) /* read_concurrency */
+#define DB_NAMED_TABLE (1 << 11)
+#define DB_BUSY (1 << 12)
+
+#define DB_CATREE_FORCE_SPLIT (1 << 31) /* erts_debug */
+#define DB_CATREE_DEBUG_RANDOM_SPLIT_JOIN (1 << 30) /* erts_debug */
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
#define IS_TREE_TABLE(Status) (!!((Status) & \
DB_ORDERED_SET))
+#define IS_CATREE_TABLE(Status) (!!((Status) & \
+ DB_CA_ORDERED_SET))
#define NFIXED(T) (erts_refc_read(&(T)->common.fix_count,0))
#define IS_FIXED(T) (NFIXED(T) != 0)
diff --git a/erts/emulator/beam/erl_dirty_bif.tab b/erts/emulator/beam/erl_dirty_bif.tab
index 20299ff604..656acfebdb 100644
--- a/erts/emulator/beam/erl_dirty_bif.tab
+++ b/erts/emulator/beam/erl_dirty_bif.tab
@@ -57,9 +57,6 @@ dirty-cpu erts_debug:lcnt_clear/0
# and debug purposes only. We really do *not* want to execute these
# on dirty schedulers on a real system.
-dirty-cpu-test erlang:'++'/2
-dirty-cpu-test erlang:append/2
-dirty-cpu-test erlang:iolist_size/1
dirty-cpu-test erlang:make_tuple/2
dirty-cpu-test erlang:make_tuple/3
dirty-cpu-test erlang:append_element/2
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index 31b4817fb1..a5ecbfff06 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -53,7 +53,9 @@ typedef enum {
enum ErlNifSelectFlags {
ERL_NIF_SELECT_READ = (1 << 0),
ERL_NIF_SELECT_WRITE = (1 << 1),
- ERL_NIF_SELECT_STOP = (1 << 2)
+ ERL_NIF_SELECT_STOP = (1 << 2),
+ ERL_NIF_SELECT_CANCEL = (1 << 3),
+ ERL_NIF_SELECT_CUSTOM_MSG= (1 << 4)
};
/*
diff --git a/erts/emulator/beam/erl_flxctr.c b/erts/emulator/beam/erl_flxctr.c
new file mode 100644
index 0000000000..35f4a21508
--- /dev/null
+++ b/erts/emulator/beam/erl_flxctr.c
@@ -0,0 +1,370 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2019. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Kjell Winblad
+ */
+
+#include "erl_flxctr.h"
+
+static int reader_groups_array_size = 0;
+#define ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS (reader_groups_array_size)
+
+static int erts_flxctr_read_ctx_bin_dtor(Binary *context_bin);
+static int erts_flxctr_wait_dtor(Binary *context_bin);
+
+typedef struct {
+ ErtsThrPrgrLaterOp later_op;
+ Process* process;
+ ErtsFlxCtrDecentralizedCtrArray* array;
+ ErtsFlxCtrDecentralizedCtrArray* next_array;
+ ErtsAlcType_t alloc_type;
+ int nr_of_counters;
+ Sint result[ERTS_FLXCTR_ATOMICS_PER_CACHE_LINE];
+} DecentralizedReadSnapshotInfo;
+
+typedef enum {
+ ERTS_FLXCTR_SNAPSHOT_NOT_ONGOING = 0,
+ ERTS_FLXCTR_SNAPSHOT_ONGOING = 1,
+ ERTS_FLXCTR_SNAPSHOT_ONGOING_TP_THREAD_DO_FREE = 2
+} erts_flxctr_snapshot_status;
+
+static void
+thr_prg_wake_up_and_count(void* bin_p)
+{
+ Binary* bin = bin_p;
+ DecentralizedReadSnapshotInfo* info = ERTS_MAGIC_BIN_DATA(bin);
+ Process* p = info->process;
+ ErtsFlxCtrDecentralizedCtrArray* array = info->array;
+ ErtsFlxCtrDecentralizedCtrArray* next = info->next_array;
+ int i, sched;
+ /* Reset result array */
+ for (i = 0; i < info->nr_of_counters; i++) {
+ info->result[i] = 0;
+ }
+ /* Read result from snapshot */
+ for (sched = 0; sched < ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS; sched++) {
+ for (i = 0; i < info->nr_of_counters; i++) {
+ info->result[i] = info->result[i] +
+ erts_atomic_read_nob(&array->array[sched].counters[i]);
+ }
+ }
+ /* Update the next decentralized counter array */
+ for (i = 0; i < info->nr_of_counters; i++) {
+ erts_atomic_add_nob(&next->array[0].counters[i], info->result[i]);
+ }
+ /* Announce that the snapshot is done */
+ {
+ Sint expected = ERTS_FLXCTR_SNAPSHOT_ONGOING;
+ if (expected != erts_atomic_cmpxchg_mb(&next->snapshot_status,
+ ERTS_FLXCTR_SNAPSHOT_NOT_ONGOING,
+ expected)) {
+ /* The CAS failed which means that this thread need to free the next array. */
+ erts_free(info->alloc_type, next->block_start);
+ }
+ }
+ /* Resume the process that requested the snapshot */
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(p)) {
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ }
+ /* Free the memory that is no longer needed */
+ erts_free(info->alloc_type, array->block_start);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_dec_refc(p);
+ erts_bin_release(bin);
+}
+
+typedef struct {
+ ErtsThrPrgrLaterOp later_op;
+ Process* process;
+} ErtsFlxCtrWakeUpLaterInfo;
+
+static void
+thr_prg_wake_up_later(void* bin_p)
+{
+ Binary* bin = bin_p;
+ ErtsFlxCtrWakeUpLaterInfo* info = ERTS_MAGIC_BIN_DATA(bin);
+ Process* p = info->process;
+ /* Resume the requesting process */
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(p)) {
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ /* Free data */
+ erts_proc_dec_refc(p);
+ erts_bin_release(bin);
+}
+
+static
+int erts_flxctr_read_ctx_bin_dtor(Binary *context_bin) {
+ (void)context_bin;
+ return 1;
+}
+
+static
+int erts_flxctr_wait_dtor(Binary *context_bin) {
+ (void)context_bin;
+ return 1;
+}
+
+static void suspend_until_thr_prg(Process* p)
+{
+ Binary* state_bin;
+ ErtsFlxCtrWakeUpLaterInfo* info;
+ state_bin = erts_create_magic_binary(sizeof(ErtsFlxCtrWakeUpLaterInfo),
+ erts_flxctr_wait_dtor);
+ info = ERTS_MAGIC_BIN_DATA(state_bin);
+ info->process = p;
+ erts_refc_inctest(&state_bin->intern.refc, 1);
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
+ erts_proc_inc_refc(p);
+ ERTS_VBUMP_ALL_REDS(p);
+ erts_schedule_thr_prgr_later_op(thr_prg_wake_up_later, state_bin, &info->later_op);
+}
+
+
+static ErtsFlxCtrDecentralizedCtrArray*
+create_decentralized_ctr_array(ErtsAlcType_t alloc_type, Uint nr_of_counters) {
+ /* Allocate an ErtsFlxCtrDecentralizedCtrArray and make sure that
+ the array field is located at the start of a cache line */
+ char* bytes =
+ erts_alloc(alloc_type,
+ sizeof(ErtsFlxCtrDecentralizedCtrArray) +
+ (sizeof(ErtsFlxCtrDecentralizedCtrArrayElem) *
+ ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS) +
+ ERTS_CACHE_LINE_SIZE);
+ void* block_start = bytes;
+ int bytes_to_next_cacheline_border;
+ ErtsFlxCtrDecentralizedCtrArray* array;
+ int i, sched;
+ bytes = &bytes[offsetof(ErtsFlxCtrDecentralizedCtrArray, array)];
+ bytes_to_next_cacheline_border =
+ ERTS_CACHE_LINE_SIZE - (((Uint)bytes) % ERTS_CACHE_LINE_SIZE);
+ array = (ErtsFlxCtrDecentralizedCtrArray*)
+ (&bytes[bytes_to_next_cacheline_border -
+ (int)offsetof(ErtsFlxCtrDecentralizedCtrArray, array)]);
+ ASSERT(((Uint)array->array) % ERTS_CACHE_LINE_SIZE == 0);
+ ASSERT(((Uint)array - (Uint)block_start) <= ERTS_CACHE_LINE_SIZE);
+ /* Initialize fields */
+ erts_atomic_init_nob(&array->snapshot_status, ERTS_FLXCTR_SNAPSHOT_ONGOING);
+ for (sched = 0; sched < ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS; sched++) {
+ for (i = 0; i < nr_of_counters; i++) {
+ erts_atomic_init_nob(&array->array[sched].counters[i], 0);
+ }
+ }
+ array->block_start = block_start;
+ return array;
+}
+
+void erts_flxctr_setup(int decentralized_counter_groups)
+{
+ reader_groups_array_size = decentralized_counter_groups+1;
+}
+
+void erts_flxctr_init(ErtsFlxCtr* c,
+ int is_decentralized,
+ Uint nr_of_counters,
+ ErtsAlcType_t alloc_type)
+{
+ ASSERT(nr_of_counters <= ERTS_FLXCTR_ATOMICS_PER_CACHE_LINE);
+ c->is_decentralized = is_decentralized;
+ c->nr_of_counters = nr_of_counters;
+ if (c->is_decentralized) {
+ ErtsFlxCtrDecentralizedCtrArray* array =
+ create_decentralized_ctr_array(alloc_type, nr_of_counters);
+ erts_atomic_set_nob(&array->snapshot_status,
+ ERTS_FLXCTR_SNAPSHOT_NOT_ONGOING);
+ erts_atomic_init_nob(&c->u.counters_ptr, (Sint)array);
+ ASSERT(((Uint)array->array) % ERTS_CACHE_LINE_SIZE == 0);
+ } else {
+ int i;
+ for (i = 0; i < nr_of_counters; i++) {
+ erts_atomic_init_nob(&c->u.counters[i], 0);
+ }
+ }
+}
+
+void erts_flxctr_destroy(ErtsFlxCtr* c, ErtsAlcType_t type)
+{
+ if (c->is_decentralized) {
+ if (erts_flxctr_is_snapshot_ongoing(c)) {
+ ErtsFlxCtrDecentralizedCtrArray* array =
+ ERTS_FLXCTR_GET_CTR_ARRAY_PTR(c);
+ /* Try to delegate the resposibilty of freeing to
+ thr_prg_wake_up_and_count */
+ Sint expected = ERTS_FLXCTR_SNAPSHOT_ONGOING;
+ if (expected !=
+ erts_atomic_cmpxchg_mb(&array->snapshot_status,
+ ERTS_FLXCTR_SNAPSHOT_ONGOING_TP_THREAD_DO_FREE,
+ expected)) {
+ /* The delegation was unsuccessful which means that no
+ snapshot is ongoing anymore and the freeing needs
+ to be done here */
+ ERTS_ASSERT(!erts_flxctr_is_snapshot_ongoing(c));
+ erts_free(type, array->block_start);
+ }
+ } else {
+ erts_free(type, ERTS_FLXCTR_GET_CTR_ARRAY_PTR(c)->block_start);
+ }
+ }
+}
+
+ErtsFlxCtrSnapshotResult
+erts_flxctr_snapshot(ErtsFlxCtr* c,
+ ErtsAlcType_t alloc_type,
+ Process* p)
+{
+ if (c->is_decentralized) {
+ ErtsFlxCtrDecentralizedCtrArray* array = ERTS_FLXCTR_GET_CTR_ARRAY_PTR(c);
+ if (erts_flxctr_is_snapshot_ongoing(c)) {
+ /* Let the caller try again later */
+ ErtsFlxCtrSnapshotResult res =
+ {.type = ERTS_FLXCTR_TRY_AGAIN_AFTER_TRAP};
+ suspend_until_thr_prg(p);
+ return res;
+ } else {
+ Eterm* hp;
+ Binary* state_bin;
+ Eterm state_mref;
+ DecentralizedReadSnapshotInfo* info;
+ ErtsFlxCtrDecentralizedCtrArray* new_array =
+ create_decentralized_ctr_array(alloc_type, c->nr_of_counters);
+ int success =
+ ((Sint)array) == erts_atomic_cmpxchg_mb(&c->u.counters_ptr,
+ (Sint)new_array,
+ (Sint)array);
+ if (!success) {
+ /* Let the caller try again later */
+ ErtsFlxCtrSnapshotResult res =
+ {.type = ERTS_FLXCTR_TRY_AGAIN_AFTER_TRAP};
+ suspend_until_thr_prg(p);
+ erts_free(alloc_type, new_array->block_start);
+ return res;
+ }
+ /* Create binary with info about the operation that can be
+ sent to the caller and to a thread progress function */
+ state_bin =
+ erts_create_magic_binary(sizeof(DecentralizedReadSnapshotInfo),
+ erts_flxctr_read_ctx_bin_dtor);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ state_mref = erts_mk_magic_ref(&hp, &MSO(p), state_bin);
+ info = ERTS_MAGIC_BIN_DATA(state_bin);
+ info->alloc_type = alloc_type;
+ info->array = array;
+ info->next_array = new_array;
+ info->process = p;
+ info->nr_of_counters = c->nr_of_counters;
+ erts_proc_inc_refc(p);
+ erts_refc_inctest(&state_bin->intern.refc, 2);
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_VBUMP_ALL_REDS(p);
+ erts_schedule_thr_prgr_later_op(thr_prg_wake_up_and_count,
+ state_bin,
+ &info->later_op);
+ {
+ ErtsFlxCtrSnapshotResult res = {
+ .type = ERTS_FLXCTR_GET_RESULT_AFTER_TRAP,
+ .trap_resume_state = state_mref};
+ return res;
+ }
+ }
+ } else {
+ ErtsFlxCtrSnapshotResult res;
+ int i;
+ res.type = ERTS_FLXCTR_DONE;
+ for (i = 0; i < c->nr_of_counters; i++){
+ res.result[i] = erts_flxctr_read_centralized(c, i);
+ }
+ return res;
+ }
+}
+
+
+Sint erts_flxctr_get_snapshot_result_after_trap(Eterm result_holder,
+ Uint counter_nr)
+{
+ Binary* bin = erts_magic_ref2bin(result_holder);
+ DecentralizedReadSnapshotInfo* data = ERTS_MAGIC_BIN_DATA(bin);;
+ return data->result[counter_nr];
+}
+
+int erts_flxctr_is_snapshot_result(Eterm term)
+{
+ if (is_internal_magic_ref(term)) {
+ Binary* bin = erts_magic_ref2bin(term);
+ return ERTS_MAGIC_BIN_DESTRUCTOR(bin) == erts_flxctr_read_ctx_bin_dtor;
+ } else return 0;
+}
+
+Sint erts_flxctr_read_approx(ErtsFlxCtr* c,
+ Uint counter_nr)
+{
+ if (c->is_decentralized) {
+ ErtsFlxCtrDecentralizedCtrArray* counter = ERTS_FLXCTR_GET_CTR_ARRAY_PTR(c);
+ Sint sum = 0;
+ int sched;
+ for (sched = 0; sched < ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS; sched++) {
+ sum = sum + erts_atomic_read_nob(&counter->array[sched].counters[counter_nr]);
+ }
+ return sum;
+ } else {
+ return erts_flxctr_read_centralized(c, counter_nr);
+ }
+}
+
+int erts_flxctr_is_snapshot_ongoing(ErtsFlxCtr* c)
+{
+ return c->is_decentralized &&
+ (ERTS_FLXCTR_SNAPSHOT_NOT_ONGOING !=
+ erts_atomic_read_acqb(&ERTS_FLXCTR_GET_CTR_ARRAY_PTR(c)->snapshot_status));
+}
+
+int erts_flxctr_suspend_until_thr_prg_if_snapshot_ongoing(ErtsFlxCtr* c, Process* p)
+{
+ if (erts_flxctr_is_snapshot_ongoing(c)) {
+ suspend_until_thr_prg(p);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+void erts_flxctr_reset(ErtsFlxCtr* c,
+ Uint counter_nr)
+{
+ if (c->is_decentralized) {
+ int sched;
+ ErtsFlxCtrDecentralizedCtrArray* counter =
+ ERTS_FLXCTR_GET_CTR_ARRAY_PTR(c);
+ for (sched = 0; sched < ERTS_FLXCTR_DECENTRALIZED_NO_SLOTS; sched++) {
+ erts_atomic_set_nob(&counter->array[sched].counters[counter_nr], 0);
+ }
+ } else {
+ erts_atomic_set_nob(&c->u.counters[counter_nr], 0);
+ }
+}
+
+
+void erts_flxctr_set_slot(int group) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ esdp->flxctr_slot_no = group;
+}
diff --git a/erts/emulator/beam/erl_flxctr.h b/erts/emulator/beam/erl_flxctr.h
new file mode 100644
index 0000000000..5cab02b9eb
--- /dev/null
+++ b/erts/emulator/beam/erl_flxctr.h
@@ -0,0 +1,406 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2019. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/**
+ * @file erl_flxctr.h
+ *
+ * @brief This file contains the API of a flexible counter. The
+ * counter can be configured during its initialization to be
+ * centralized or decentralized. The centralized configuration makes
+ * it possible to read the counter value extremely efficiently, but
+ * updates of the counter value can easily cause contention. The
+ * decentralized configuration has the reverse trade-off (i.e.,
+ * updates are efficient and scalable but reading the counter value is
+ * slow and may cause contention).
+ *
+ * @author Kjell Winblad
+ */
+
+#ifndef ERL_FLXCTR_H__
+#define ERL_FLXCTR_H__
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "error.h"
+#include "bif.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "bif.h"
+#include <stddef.h>
+
+/* Public Interface */
+
+#define ERTS_MAX_FLXCTR_GROUPS 256
+#define ERTS_FLXCTR_ATOMICS_PER_CACHE_LINE (ERTS_CACHE_LINE_SIZE / sizeof(erts_atomic_t))
+
+typedef struct {
+ int nr_of_counters;
+ int is_decentralized;
+ union {
+ erts_atomic_t counters_ptr;
+ erts_atomic_t counters[1];
+ } u;
+} ErtsFlxCtr;
+
+#define ERTS_FLXCTR_NR_OF_EXTRA_BYTES(NR_OF_COUNTERS) \
+ ((NR_OF_COUNTERS-1) * sizeof(erts_atomic_t))
+
+/* Called by early_init */
+void erts_flxctr_setup(int decentralized_counter_groups);
+
+/**
+ * @brief Initializes an ErtsFlxCtr. The macro
+ * ERTS_FLXCTR_NR_OF_EXTRA_BYTES should be used to determine how much
+ * extra space that needs to be allocated directly after the
+ * ErtsFlxCtr when is_decentralized is set to zero. Each ErtsFlxCtr
+ * instance may contain up to ERTS_FLXCTR_ATOMICS_PER_CACHE_LINE
+ * counters. These counters are numbered from zero to
+ * (ERTS_FLXCTR_ATOMICS_PER_CACHE_LINE-1). Most of the functions in
+ * this module take a parameter named counter_nr that controls which
+ * of the ERTS_FLXCTR_ATOMICS_PER_CACHE_LINE counters in the given
+ * ErtsFlxCtr that should be operated on.
+ *
+ * @param c The counter to initialize
+ * @param is_decentralized Non-zero value to make c decentralized
+ * @param nr_of_counters The number of counters included in c
+ * (max ERTS_FLXCTR_ATOMICS_PER_CACHE_LINE)
+ * @param alloc_type
+ */
+void erts_flxctr_init(ErtsFlxCtr* c,
+ int is_decentralized,
+ Uint nr_of_counters,
+ ErtsAlcType_t alloc_type);
+
+/**
+ * @brief Destroys an initialized counter.
+ *
+ * @param c The counter that should be destroyed
+ * @param alloc_type The allocation type (needs to be the same as the
+ * one passed to erts_flxctr_init when c was
+ * initialized)
+ */
+void erts_flxctr_destroy(ErtsFlxCtr* c, ErtsAlcType_t alloc_type);
+
+/**
+ * @brief Adds to_add to the counter with counter_nr in c
+ *
+ * @param c the ErtsFlxCtr to operate on
+ * @param counter_nr The number of the counter in c to modify
+ * @param to_add The amount that should be added to the specified counter
+ */
+ERTS_GLB_INLINE
+void erts_flxctr_add(ErtsFlxCtr* c,
+ Uint counter_nr,
+ int to_add);
+
+/**
+ * @brief Increases the specified counter by 1
+ *
+ * @param c The ErtsFlxCtr instance to operate on
+ * @param counter_nr The number of the counter within c to operate on
+ */
+ERTS_GLB_INLINE
+void erts_flxctr_inc(ErtsFlxCtr* c,
+ Uint counter_nr);
+
+/**
+ * @brief Decreases the specified counter by 1
+ */
+ERTS_GLB_INLINE
+void erts_flxctr_dec(ErtsFlxCtr* c,
+ Uint counter_nr);
+
+/**
+ * @brief This function tries to return the current value of the
+ * specified counter but may return an incorrect result if the counter
+ * is decentralized and other threads are accessing the counter
+ * concurrently.
+ *
+ * @param c The ErtsFlxCtr instance to operate on
+ * @param counter_nr The number of the counter within c to operate on
+ *
+ * @return A snapshot of the specifed counter if c is centralized or a
+ * possibly incorrect estimate of the counter value if c is
+ * decentralized
+ */
+Sint erts_flxctr_read_approx(ErtsFlxCtr* c,
+ Uint counter_nr);
+
+/**
+ * @brief This function can only be used together with an ErtsFlxCtr
+ * that is configured to be centralized. The function increments the
+ * specified counter by 1 and returns the value of the counter after
+ * the increment.
+ */
+ERTS_GLB_INLINE
+Sint erts_flxctr_inc_read_centralized(ErtsFlxCtr* c,
+ Uint counter_nr);
+
+/**
+ * @brief This function can only be used together with a ErtsFlxCtr
+ * that is configured to be centralized. The function decrements the
+ * specified counter by 1 and returns the value of the counter after
+ * the operation.
+ */
+ERTS_GLB_INLINE
+Sint erts_flxctr_dec_read_centralized(ErtsFlxCtr* c,
+ Uint counter_nr);
+
+/**
+ * @brief This function can only be used together with an ErtsFlxCtr
+ * that is configured to be centralized. The function returns the
+ * current value of the specified counter.
+ */
+ERTS_GLB_INLINE
+Sint erts_flxctr_read_centralized(ErtsFlxCtr* c,
+ Uint counter_nr);
+
+
+typedef enum {
+ ERTS_FLXCTR_TRY_AGAIN_AFTER_TRAP,
+ ERTS_FLXCTR_DONE,
+ ERTS_FLXCTR_GET_RESULT_AFTER_TRAP
+} ErtsFlxctrSnapshotResultType;
+
+typedef struct {
+ ErtsFlxctrSnapshotResultType type;
+ Eterm trap_resume_state;
+ Sint result[ERTS_FLXCTR_ATOMICS_PER_CACHE_LINE];
+} ErtsFlxCtrSnapshotResult;
+
+/**
+ * @brief This function initiates an atomic snapshot of an ErtsFlxCtr
+ * to read out the values of one or more of the counters that are
+ * stored in the given ErtsFlxCtr. The caller needs to perform
+ * different actions after the return of this function depending on
+ * the value of the type field in the returned struct:
+ *
+ * - The caller needs to trap and try again after the trap if the
+ * return value has the type ERTS_FLXCTR_TRY_AGAIN_AFTER_TRAP.
+ *
+ * - The caller can get the result directly from the result field of
+ * the returned struct if the return value has the type
+ * ERTS_FLXCTR_DONE. The value at index i in the result field
+ * correspond to counter number i.
+ *
+ * - Finally, if the return value has the type
+ * ERTS_FLXCTR_GET_RESULT_AFTER_TRAP, then the caller needs to save
+ * the value of the field trap_resume_state from the returned struct
+ * and trap. After the trap, the values of the counters can be
+ * obtained by using the function
+ * erts_flxctr_get_snapshot_result_after_trap. Note that the
+ * function erts_flxctr_is_snapshot_result can be used to check if a
+ * value is obtained from the trap_resume_state field in the
+ * returned struct (this can be useful when the calling function
+ * wakes up again after the trap).
+ *
+ * The snapshot operation that is initiated by this function should be
+ * considered to be ongoing from the issuing of this function until a
+ * struct with the type field set to ERTS_FLXCTR_DONE has been
+ * returned from the function or until the caller of this function has
+ * woken up after trapping.
+ *
+ * @param c The ErtsFlxCtr that the snapshot shall be taken from
+ * @param alloc_type The allocation type (needs to be the same as the
+ * type passed to erts_flxctr_init when c was
+ * initialized)
+ * @param p The Erlang process that is doing the call
+ *
+ * @return See the description above
+ *
+ */
+ErtsFlxCtrSnapshotResult
+erts_flxctr_snapshot(ErtsFlxCtr* c,
+ ErtsAlcType_t alloc_type,
+ Process* p);
+
+/**
+ * @brief Checks if the parameter term is a snapshot result (i.e.,
+ * something obtained from the trap_resume_state field of an
+ * ErtsFlxCtrSnapshotResult struct that has been returned from
+ * erts_flxctr_snapshot).
+ *
+ * @param term The term to check
+ *
+ * @return A nonzero value iff the term is a snapshot result
+ */
+int erts_flxctr_is_snapshot_result(Eterm term);
+
+/**
+ * @brief Returns the result of a snapshot for a counter given a
+ * snapshot result returned by a call to erts_flxctr_snapshot (i.e.,
+ * the value stored in the trap_resume_state field of a struct
+ * returned by erts_flxctr_snapshot). The caller needs to trap between
+ * the return of erts_flxctr_snapshot and the call to this function.
+ */
+Sint erts_flxctr_get_snapshot_result_after_trap(Eterm trap_resume_state,
+ Uint counter_nr);
+
+/**
+ * @brief Resets the specified counter to 0. This function is unsafe
+ * to call while a snapshot operation may be active (initiated with
+ * the erts_flxctr_snapshot function).
+ */
+void erts_flxctr_reset(ErtsFlxCtr* c,
+ Uint counter_nr);
+
+/**
+ * @brief Checks if a snapshot operation is active (snapshots are
+ * initiated with the erts_flxctr_snapshot function).
+ *
+ * @return nonzero value iff a snapshot was active at some point
+ * between the invocation and return of the function
+ */
+int erts_flxctr_is_snapshot_ongoing(ErtsFlxCtr* c);
+
+/**
+ * @brief This function checks if a snapshot operation is ongoing
+ * (snapshots are initiated with the erts_flxctr_snapshot function)
+ * and suspend the given process until thread progress has happened if
+ * it detected an ongoing snapshot operation. The caller needs to trap
+ * if a non-zero value is returned.
+ *
+ * @param c The ErtsFlxCtr to check
+ * @param p The calling process
+ *
+ * @return nonzero value if the given process has got suspended
+ */
+int erts_flxctr_suspend_until_thr_prg_if_snapshot_ongoing(ErtsFlxCtr* c, Process* p);
+
+/* End: Public Interface */
+
+/* Internal Declarations */
+
+#define ERTS_FLXCTR_GET_CTR_ARRAY_PTR(C) \
+ ((ErtsFlxCtrDecentralizedCtrArray*) erts_atomic_read_acqb(&(C)->u.counters_ptr))
+#define ERTS_FLXCTR_GET_CTR_PTR(C, SCHEDULER_ID, COUNTER_ID) \
+ &(ERTS_FLXCTR_GET_CTR_ARRAY_PTR(C))->array[SCHEDULER_ID].counters[COUNTER_ID]
+
+
+typedef union {
+ erts_atomic_t counters[ERTS_FLXCTR_ATOMICS_PER_CACHE_LINE];
+ char pad[ERTS_CACHE_LINE_SIZE];
+} ErtsFlxCtrDecentralizedCtrArrayElem;
+
+typedef struct ErtsFlxCtrDecentralizedCtrArray {
+ void* block_start;
+ erts_atomic_t snapshot_status;
+ ErtsFlxCtrDecentralizedCtrArrayElem array[];
+} ErtsFlxCtrDecentralizedCtrArray;
+
+void erts_flxctr_set_slot(int group);
+
+ERTS_GLB_INLINE
+int erts_flxctr_get_slot_index(void);
+
+/* End: Internal Declarations */
+
+
+/* Implementation of inlined functions */
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+int erts_flxctr_get_slot_index(void)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+ ASSERT(esdp->flxctr_slot_no > 0);
+ return esdp->flxctr_slot_no;
+}
+
+ERTS_GLB_INLINE
+void erts_flxctr_add(ErtsFlxCtr* c,
+ Uint counter_nr,
+ int to_add)
+{
+ ASSERT(counter_nr < c->nr_of_counters);
+ if (c->is_decentralized) {
+ erts_atomic_add_nob(ERTS_FLXCTR_GET_CTR_PTR(c,
+ erts_flxctr_get_slot_index(),
+ counter_nr),
+ to_add);
+ } else {
+ erts_atomic_add_nob(&c->u.counters[counter_nr], to_add);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_flxctr_inc(ErtsFlxCtr* c,
+ Uint counter_nr)
+{
+ ASSERT(counter_nr < c->nr_of_counters);
+ if (c->is_decentralized) {
+ erts_atomic_inc_nob(ERTS_FLXCTR_GET_CTR_PTR(c,
+ erts_flxctr_get_slot_index(),
+ counter_nr));
+ } else {
+ erts_atomic_inc_read_nob(&c->u.counters[counter_nr]);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_flxctr_dec(ErtsFlxCtr* c,
+ Uint counter_nr)
+{
+ ASSERT(counter_nr < c->nr_of_counters);
+ if (c->is_decentralized) {
+ erts_atomic_dec_nob(ERTS_FLXCTR_GET_CTR_PTR(c,
+ erts_flxctr_get_slot_index(),
+ counter_nr));
+ } else {
+ erts_atomic_dec_nob(&c->u.counters[counter_nr]);
+ }
+}
+
+ERTS_GLB_INLINE
+Sint erts_flxctr_inc_read_centralized(ErtsFlxCtr* c,
+ Uint counter_nr)
+{
+ ASSERT(counter_nr < c->nr_of_counters);
+ ASSERT(!c->is_decentralized);
+ return erts_atomic_inc_read_nob(&c->u.counters[counter_nr]);
+}
+
+ERTS_GLB_INLINE
+Sint erts_flxctr_dec_read_centralized(ErtsFlxCtr* c,
+ Uint counter_nr)
+{
+ ASSERT(counter_nr < c->nr_of_counters);
+ ASSERT(!c->is_decentralized);
+ return erts_atomic_dec_read_nob(&c->u.counters[counter_nr]);
+}
+
+ERTS_GLB_INLINE
+Sint erts_flxctr_read_centralized(ErtsFlxCtr* c,
+ Uint counter_nr)
+{
+ ASSERT(counter_nr < c->nr_of_counters);
+ ASSERT(!c->is_decentralized);
+ return erts_atomic_read_nob(&((erts_atomic_t*)(c->u.counters))[counter_nr]);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERL_FLXCTR_H__ */
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 3a50b294d1..67a73e4d57 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -577,7 +577,7 @@ force_reschedule:
}
static ERTS_FORCE_INLINE Uint
-young_gen_usage(Process *p)
+young_gen_usage(Process *p, Uint *ext_msg_usage)
{
Uint hsz;
Eterm *aheap;
@@ -604,7 +604,10 @@ young_gen_usage(Process *p)
if (ERTS_SIG_IS_MSG(mp)
&& mp->data.attached
&& mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
- hsz += erts_msg_attached_data_size(mp);
+ Uint sz = erts_msg_attached_data_size(mp);
+ if (ERTS_SIG_IS_EXTERNAL_MSG(mp))
+ *ext_msg_usage += sz;
+ hsz += sz;
}
});
}
@@ -676,6 +679,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
{
Uint reclaimed_now = 0;
Uint ygen_usage;
+ Uint ext_msg_usage = 0;
Eterm gc_trace_end_tag;
int reds;
ErtsMonotonicTime start_time;
@@ -698,7 +702,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
return delay_garbage_collection(p, live_hf_end, need, fcalls);
}
- ygen_usage = max_young_gen_usage ? max_young_gen_usage : young_gen_usage(p);
+ ygen_usage = max_young_gen_usage ? max_young_gen_usage : young_gen_usage(p, &ext_msg_usage);
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
check_for_possibly_long_gc(p, ygen_usage);
@@ -739,7 +743,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
trace_gc(p, am_gc_minor_start, need, THE_NON_VALUE);
}
DTRACE2(gc_minor_start, pidbuf, need);
- reds = minor_collection(p, live_hf_end, need, objv, nobj,
+ reds = minor_collection(p, live_hf_end, need + ext_msg_usage, objv, nobj,
ygen_usage, &reclaimed_now);
DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
if (reds == -1) {
@@ -764,7 +768,7 @@ do_major_collection:
trace_gc(p, am_gc_major_start, need, THE_NON_VALUE);
}
DTRACE2(gc_major_start, pidbuf, need);
- reds = major_collection(p, live_hf_end, need, objv, nobj,
+ reds = major_collection(p, live_hf_end, need + ext_msg_usage, objv, nobj,
ygen_usage, &reclaimed_now);
if (ERTS_SCHEDULER_IS_DIRTY(esdp))
p->flags &= ~(F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
@@ -1101,6 +1105,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
Eterm* old_htop;
Uint n;
Uint ygen_usage = 0;
+ Uint ext_msg_usage = 0;
struct erl_off_heap_header** prev = NULL;
Sint64 reds;
int hibernated = !!(p->flags & F_HIBERNATED);
@@ -1118,7 +1123,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
p->flags &= ~F_DIRTY_CLA;
else {
Uint size = byte_lit_size/sizeof(Uint);
- ygen_usage = young_gen_usage(p);
+ ygen_usage = young_gen_usage(p, &ext_msg_usage);
if (hibernated)
size = size*2 + 3*ygen_usage;
else
@@ -1130,7 +1135,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
}
}
- reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, 0,
+ reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, ext_msg_usage,
p->arg_reg, p->arity, fcalls,
ygen_usage);
if (ERTS_PROC_IS_EXITING(p)) {
@@ -1303,7 +1308,8 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
ExternalThing *etp;
ASSERT(is_external_header(ptr->thing_word));
etp = (ExternalThing *) ptr;
- erts_refc_inc(&etp->node->refc, 1);
+ erts_ref_node_entry(etp->node, 1,
+ make_boxed(&oh->thing_word));
break;
}
}
@@ -1347,6 +1353,9 @@ minor_collection(Process* p, ErlHeapFragment *live_hf_end,
Eterm *mature = p->abandoned_heap ? p->abandoned_heap : p->heap;
Uint mature_size = p->high_water - mature;
Uint size_before = ygen_usage;
+#ifdef DEBUG
+ Uint debug_tmp = 0;
+#endif
/*
* Check if we have gone past the max heap size limit
@@ -1483,7 +1492,7 @@ minor_collection(Process* p, ErlHeapFragment *live_hf_end,
process from there */
ASSERT(!MAX_HEAP_SIZE_GET(p) ||
!(MAX_HEAP_SIZE_FLAGS_GET(p) & MAX_HEAP_SIZE_KILL) ||
- MAX_HEAP_SIZE_GET(p) > (young_gen_usage(p) +
+ MAX_HEAP_SIZE_GET(p) > (young_gen_usage(p, &debug_tmp) +
(OLD_HEND(p) - OLD_HEAP(p)) +
(HEAP_END(p) - HEAP_TOP(p))));
@@ -2836,7 +2845,11 @@ sweep_off_heap(Process *p, int fullsweep)
while (ptr) {
if (IS_MOVED_BOXED(ptr->thing_word)) {
ASSERT(!ErtsInArea(ptr, oheap, oheap_sz));
- *prev = ptr = (struct erl_off_heap_header*) boxed_val(ptr->thing_word);
+ if (is_external_header(((struct erl_off_heap_header*) boxed_val(ptr->thing_word))->thing_word))
+ erts_node_bookkeep(((ExternalThing*)ptr)->node,
+ make_boxed(&ptr->thing_word),
+ ERL_NODE_DEC);
+ *prev = ptr = (struct erl_off_heap_header*) boxed_val(ptr->thing_word);
ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
switch (ptr->thing_word) {
case HEADER_PROC_BIN: {
@@ -2863,6 +2876,11 @@ sweep_off_heap(Process *p, int fullsweep)
/* fall through... */
}
default:
+ if (is_external_header(ptr->thing_word)) {
+ erts_node_bookkeep(((ExternalThing*)ptr)->node,
+ make_boxed(&ptr->thing_word),
+ ERL_NODE_INC);
+ }
prev = &ptr->next;
ptr = ptr->next;
}
@@ -2896,7 +2914,8 @@ sweep_off_heap(Process *p, int fullsweep)
}
default:
ASSERT(is_external_header(ptr->thing_word));
- erts_deref_node_entry(((ExternalThing*)ptr)->node);
+ erts_deref_node_entry(((ExternalThing*)ptr)->node,
+ make_boxed(&ptr->thing_word));
}
*prev = ptr = ptr->next;
}
@@ -3029,6 +3048,13 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
{
struct erl_off_heap_header* oh = (struct erl_off_heap_header*) hp;
+ if (is_external_header(oh->thing_word)) {
+ erts_node_bookkeep(((ExternalThing*)oh)->node,
+ make_boxed(((Eterm*)oh)-offs), ERL_NODE_DEC);
+ erts_node_bookkeep(((ExternalThing*)oh)->node,
+ make_boxed((Eterm*)oh), ERL_NODE_INC);
+ }
+
if (ErtsInArea(oh->next, area, area_size)) {
Eterm** uptr = (Eterm **) (void *) &oh->next;
*uptr += offs; /* Patch the mso chain */
diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c
index 01d4aa54ff..68b9579433 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.c
+++ b/erts/emulator/beam/erl_goodfit_alloc.c
@@ -226,6 +226,8 @@ erts_gfalc_start(GFAllctr_t *gfallctr,
allctr->add_mbc = NULL;
allctr->remove_mbc = NULL;
allctr->largest_fblk_in_mbc = NULL;
+ allctr->first_fblk_in_mbc = NULL;
+ allctr->next_fblk_in_mbc = NULL;
allctr->init_atoms = init_atoms;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 75ad6de2c9..b0eb0e85c0 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -2263,9 +2263,10 @@ parse_bif_timer_options(Eterm option_list, int *async,
return 1;
}
-static void
-exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp)
+static int
+exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp, Sint reds)
{
+#define ERTS_BTM_CANCEL_REDS 80
ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
Uint32 sid, roflgs;
erts_aint_t state;
@@ -2290,7 +2291,7 @@ exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp)
if (sid != (Uint32) esdp->no) {
queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
- return;
+ return ERTS_BTM_CANCEL_REDS;
}
if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
@@ -2306,14 +2307,9 @@ exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp)
hl_timer_dec_refc(&tmr->type.hlt, roflgs);
else
tw_timer_dec_refc(&tmr->type.twt);
+ return ERTS_BTM_CANCEL_REDS;
}
-#ifdef ERTS_HLT_DEBUG
-# define ERTS_BTM_MAX_DESTROY_LIMIT 2
-#else
-# define ERTS_BTM_MAX_DESTROY_LIMIT 50
-#endif
-
typedef struct {
ErtsBifTimers *bif_timers;
union {
@@ -2321,10 +2317,9 @@ typedef struct {
} u;
} ErtsBifTimerYieldState;
-int erts_cancel_bif_timers(Process *p, ErtsBifTimers **btm, void **vyspp)
+int erts_cancel_bif_timers(Process *p, ErtsBifTimers **btm, void **vyspp, int reds)
{
ErtsSchedulerData *esdp = erts_proc_sched_data(p);
-
ErtsBifTimerYieldState ys = {*btm, {ERTS_RBT_YIELD_STAT_INITER}};
ErtsBifTimerYieldState *ysp;
int res;
@@ -2337,9 +2332,9 @@ int erts_cancel_bif_timers(Process *p, ErtsBifTimers **btm, void **vyspp)
exit_cancel_bif_timer,
(void *) esdp,
&ysp->u.proc_btm_yield_state,
- ERTS_BTM_MAX_DESTROY_LIMIT);
+ reds);
- if (res == 0) {
+ if (res > 0) {
if (ysp != &ys)
erts_free(ERTS_ALC_T_BTM_YIELD_STATE, ysp);
*vyspp = NULL;
@@ -2819,8 +2814,8 @@ btm_print(ErtsBifTimer *tmr, void *vbtmp, ErtsMonotonicTime tpos, int is_hlt)
(Sint64) left);
}
-static void
-btm_tree_print(ErtsBifTimer *tmr, void *vbtmp)
+static int
+btm_tree_print(ErtsBifTimer *tmr, void *vbtmp, Sint reds)
{
int is_hlt = !!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
ErtsMonotonicTime tpos;
@@ -2829,6 +2824,7 @@ btm_tree_print(ErtsBifTimer *tmr, void *vbtmp)
else
tpos = erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr);
btm_print(tmr, vbtmp, tpos, is_hlt);
+ return 1;
}
void
@@ -2860,8 +2856,8 @@ typedef struct {
void *arg;
} ErtsBTMForeachDebug;
-static void
-debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd)
+static int
+debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd, Sint reds)
{
if (erts_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) {
ErtsBTMForeachDebug *btmfd = (ErtsBTMForeachDebug *) vbtmfd;
@@ -2870,6 +2866,7 @@ debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd)
: tmr->type.head.receiver.proc->common.id);
(*btmfd->func)(id, tmr->btm.message, tmr->btm.bp, btmfd->arg);
}
+ return 1;
}
void
@@ -2918,8 +2915,8 @@ debug_callback_timer_foreach_list(ErtsHLTimer *tmr, void *vdfct)
tmr->head.u.arg);
}
-static void
-debug_callback_timer_foreach(ErtsHLTimer *tmr, void *vdfct)
+static int
+debug_callback_timer_foreach(ErtsHLTimer *tmr, void *vdfct, Sint reds)
{
ErtsDebugForeachCallbackTimer *dfct
= (ErtsDebugForeachCallbackTimer *) vdfct;
@@ -2934,6 +2931,7 @@ debug_callback_timer_foreach(ErtsHLTimer *tmr, void *vdfct)
(*dfct->func)(dfct->arg,
tmr->timeout,
tmr->head.u.arg);
+ return 1;
}
static void
@@ -2981,7 +2979,8 @@ erts_debug_callback_timer_foreach(void (*tclbk)(void *),
if (srv->yield.root)
debug_callback_timer_foreach(srv->yield.root,
- (void *) &dfct);
+ (void *) &dfct,
+ -1);
time_rbt_foreach(srv->time_tree,
debug_callback_timer_foreach,
diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h
index e6f5e8b67d..29c873868b 100644
--- a/erts/emulator/beam/erl_hl_timer.h
+++ b/erts/emulator/beam/erl_hl_timer.h
@@ -56,7 +56,7 @@ void erts_cancel_proc_timer(Process *);
void erts_set_port_timer(Port *, Sint64);
void erts_cancel_port_timer(Port *);
Sint64 erts_read_port_timer(Port *);
-int erts_cancel_bif_timers(Process *, ErtsBifTimers **, void **);
+int erts_cancel_bif_timers(Process *, ErtsBifTimers **, void **, int);
int erts_detach_accessor_bif_timers(Process *, ErtsBifTimers *, void **);
ErtsHLTimerService *erts_create_timer_service(void);
void erts_hl_timer_init(void);
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 163724ed3c..547e4064a2 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -128,7 +128,7 @@ const Eterm etp_hole_marker = 0;
static int modified_sched_thread_suggested_stack_size = 0;
-Eterm erts_init_process_id;
+Eterm erts_init_process_id = ERTS_INVALID_PID;
/*
* Note about VxWorks: All variables must be initialized by executable code,
@@ -163,7 +163,6 @@ int erts_initialized = 0;
* Configurable parameters.
*/
-Uint display_items; /* no of items to display in traces etc */
int H_MIN_SIZE; /* The minimum heap grain */
int BIN_VH_MIN_SIZE; /* The minimum binary virtual*/
int H_MAX_SIZE; /* The maximum heap size */
@@ -354,6 +353,7 @@ erl_init(int ncpu,
erts_init_bif();
erts_init_bif_chksum();
erts_init_bif_binary();
+ erts_init_bif_guard();
erts_init_bif_persistent_term();
erts_init_bif_re();
erts_init_unicode(); /* after RE to get access to PCRE unicode */
@@ -376,6 +376,7 @@ erl_init(int ncpu,
}
static Eterm
+
erl_spawn_system_process(Process* parent, Eterm mod, Eterm func, Eterm args,
ErlSpawnOpts *so)
{
@@ -397,16 +398,15 @@ erl_spawn_system_process(Process* parent, Eterm mod, Eterm func, Eterm args,
}
static Eterm
-erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv)
+erl_first_process_otp(char* mod_name, int argc, char** argv)
{
int i;
- Eterm start_mod;
Eterm args;
Eterm res;
Eterm* hp;
Process parent;
ErlSpawnOpts so;
- Eterm env;
+ Eterm boot_mod;
/*
* We need a dummy parent process to be able to call erl_create_process().
@@ -422,16 +422,14 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
args = CONS(hp, new_binary(&parent, (byte*)argv[i], len), args);
hp += 2;
}
- env = new_binary(&parent, code, size);
+ boot_mod = erts_atom_put((byte *) mod_name, sys_strlen(mod_name),
+ ERTS_ATOM_ENC_LATIN1, 1);
args = CONS(hp, args, NIL);
hp += 2;
- args = CONS(hp, env, args);
-
- start_mod = erts_atom_put((byte *) modname, sys_strlen(modname),
- ERTS_ATOM_ENC_LATIN1, 1);
+ args = CONS(hp, boot_mod, args);
so.flags = erts_default_spo_flags;
- res = erl_spawn_system_process(&parent, start_mod, am_start, args, &so);
+ res = erl_spawn_system_process(&parent, am_erl_init, am_start, args, &so);
ASSERT(is_internal_pid(res));
erts_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN);
@@ -527,7 +525,6 @@ erts_preloaded(Process* p)
/* static variables that must not change (use same values at restart) */
static char* program;
static char* init = "init";
-static char* boot = "boot";
static int boot_argc;
static char** boot_argv;
@@ -581,9 +578,6 @@ void erts_usage(void)
int this_rel = this_rel_num();
erts_fprintf(stderr, "Usage: %s [flags] [ -- [init_args] ]\n", progname(program));
erts_fprintf(stderr, "The flags are:\n\n");
-
- /* erts_fprintf(stderr, "-# number set the number of items to be used in traces etc\n"); */
-
erts_fprintf(stderr, "-a size suggested stack size in kilo words for threads\n");
erts_fprintf(stderr, " in the async-thread pool, valid range is [%d-%d]\n",
ERTS_ASYNC_THREAD_MIN_STACK_SIZE,
@@ -591,18 +585,15 @@ void erts_usage(void)
erts_fprintf(stderr, "-A number set number of threads in async thread pool,\n");
erts_fprintf(stderr, " valid range is [0-%d]\n",
ERTS_MAX_NO_OF_ASYNC_THREADS);
-
erts_fprintf(stderr, "-B[c|d|i] c to have Ctrl-c interrupt the Erlang shell,\n");
erts_fprintf(stderr, " d (or no extra option) to disable the break\n");
erts_fprintf(stderr, " handler, i to ignore break signals\n");
-
- /* erts_fprintf(stderr, "-b func set the boot function (default boot)\n"); */
-
erts_fprintf(stderr, "-c bool enable or disable time correction\n");
erts_fprintf(stderr, "-C mode set time warp mode; valid modes are:\n");
erts_fprintf(stderr, " no_time_warp|single_time_warp|multi_time_warp\n");
erts_fprintf(stderr, "-d don't write a crash dump for internally detected errors\n");
erts_fprintf(stderr, " (halt(String) will still produce a crash dump)\n");
+ erts_fprintf(stderr, "-dcg set the limit for the number of decentralized counter groups\n");
erts_fprintf(stderr, "-fn[u|a|l] Control how filenames are interpreted\n");
erts_fprintf(stderr, "-hms size set minimum heap size in words (default %d)\n",
H_DEFAULT_SIZE);
@@ -616,7 +607,6 @@ void erts_usage(void)
erts_pd_initial_size);
erts_fprintf(stderr, "-hmqd val set default message queue data flag for processes,\n");
erts_fprintf(stderr, " valid values are: off_heap | on_heap\n");
-
erts_fprintf(stderr, "-IOp number set number of pollsets to be used to poll for I/O,\n");
erts_fprintf(stderr, " This value has to be equal or smaller than the\n");
erts_fprintf(stderr, " number of poll threads. If the current platform\n");
@@ -627,9 +617,7 @@ void erts_usage(void)
erts_fprintf(stderr, " number of poll threads.");
erts_fprintf(stderr, "-IOPt number set number of threads to be used to poll for I/O\n");
erts_fprintf(stderr, " as a percentage of the number of schedulers.");
-
- /* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
-
+ erts_fprintf(stderr, "-i module set the boot module (default init)\n");
erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n");
erts_fprintf(stderr, " Note that this flag is deprecated!\n");
erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
@@ -644,7 +632,6 @@ void erts_usage(void)
erts_fprintf(stderr, "-R number set compatibility release number,\n");
erts_fprintf(stderr, " valid range [%d-%d]\n",
this_rel-2, this_rel);
-
erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
erts_fprintf(stderr, "-rg amount set reader groups limit\n");
erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
@@ -715,9 +702,7 @@ void erts_usage(void)
erts_fprintf(stderr, "-T number set modified timing level, valid range is [0-%d]\n",
ERTS_MODIFIED_TIMING_LEVELS-1);
erts_fprintf(stderr, "-V print Erlang version\n");
-
erts_fprintf(stderr, "-v turn on chatty mode (GCs will be reported etc)\n");
-
erts_fprintf(stderr, "-W<i|w|e> set error logger warnings mapping,\n");
erts_fprintf(stderr, " see error_logger documentation for details\n");
erts_fprintf(stderr, "-zdbbl size set the distribution buffer busy limit in kilobytes\n");
@@ -729,9 +714,6 @@ void erts_usage(void)
erts_fprintf(stderr, "-zebwt val set ets busy wait threshold, valid values are:\n");
erts_fprintf(stderr, " none|very_short|short|medium|long|very_long|extremely_long\n");
#endif
- erts_fprintf(stderr, "-ztma bool enable/disable tuple module apply support in emulator\n");
- erts_fprintf(stderr, " (transitional flag for parameterized modules; recompile\n");
- erts_fprintf(stderr, " with +tuple_calls for compatibility with future versions)\n");
erts_fprintf(stderr, "\n");
erts_fprintf(stderr, "Note that if the emulator is started with erlexec (typically\n");
erts_fprintf(stderr, "from the erl script), these flags should be specified with +.\n");
@@ -804,6 +786,8 @@ early_init(int *argc, char **argv) /*
int dirty_io_scheds;
int max_reader_groups;
int reader_groups;
+ int max_decentralized_counter_groups;
+ int decentralized_counter_groups;
char envbuf[21]; /* enough for any 64-bit integer */
size_t envbufsz;
@@ -811,7 +795,6 @@ early_init(int *argc, char **argv) /*
erts_sched_compact_load = 1;
erts_printf_eterm_func = erts_printf_term;
- display_items = 200;
erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE;
erts_async_max_threads = ERTS_DEFAULT_NO_ASYNC_THREADS;
erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE;
@@ -824,7 +807,8 @@ early_init(int *argc, char **argv) /*
erts_initialized = 0;
- erts_pre_early_init_cpu_topology(&max_reader_groups,
+ erts_pre_early_init_cpu_topology(&max_decentralized_counter_groups,
+ &max_reader_groups,
&ncpu,
&ncpuonln,
&ncpuavail);
@@ -885,6 +869,24 @@ early_init(int *argc, char **argv) /*
}
if (argv[i][0] == '-') {
switch (argv[i][1]) {
+ case 'd': {
+ char *sub_param = argv[i]+2;
+ if (has_prefix("cg", sub_param)) {
+ char *arg = get_arg(sub_param+2, argv[i+1], &i);
+ if (sscanf(arg, "%d", &max_decentralized_counter_groups) != 1) {
+ erts_fprintf(stderr,
+ "bad decentralized counter groups limit: %s\n", arg);
+ erts_usage();
+ }
+ if (max_decentralized_counter_groups < 0) {
+ erts_fprintf(stderr,
+ "bad decentralized counter groups limit: %d\n",
+ max_decentralized_counter_groups);
+ erts_usage();
+ }
+ }
+ break;
+ }
case 'r': {
char *sub_param = argv[i]+2;
if (has_prefix("g", sub_param)) {
@@ -1206,8 +1208,10 @@ early_init(int *argc, char **argv) /*
erts_early_init_cpu_topology(no_schedulers,
&max_main_threads,
max_reader_groups,
- &reader_groups);
-
+ &reader_groups,
+ max_decentralized_counter_groups,
+ &decentralized_counter_groups);
+ erts_flxctr_setup(decentralized_counter_groups);
{
erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER;
elid.mem.std.alloc = ethr_std_alloc;
@@ -1318,25 +1322,9 @@ erl_start(int argc, char **argv)
/*
* NOTE: -M flags are handled (and removed from argv) by
- * erts_alloc_init().
- *
- * The -d, -m, -S, -t, and -T flags was removed in
- * Erlang 5.3/OTP R9C.
- *
- * -S, and -T has been reused in Erlang 5.5/OTP R11B.
- *
- * -d has been reused in a patch R12B-4.
+ * erts_alloc_init().
*/
- case '#' :
- arg = get_arg(argv[i]+2, argv[i+1], &i);
- if ((display_items = atoi(arg)) == 0) {
- erts_fprintf(stderr, "bad display items%s\n", arg);
- erts_usage();
- }
- VERBOSE(DEBUG_SYSTEM,
- ("using display items %d\n",display_items));
- break;
case 'p':
if (!sys_strncmp(argv[i],"-pc",3)) {
int printable_chars = ERL_PRINTABLE_CHARACTERS_LATIN1;
@@ -1615,11 +1603,6 @@ erl_start(int argc, char **argv)
init = get_arg(argv[i]+2, argv[i+1], &i);
break;
- case 'b':
- /* define name of initial function */
- boot = get_arg(argv[i]+2, argv[i+1], &i);
- break;
-
case 'B':
if (argv[i][2] == 'i') /* +Bi */
ignore_break = 1;
@@ -2215,17 +2198,6 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
- else if (has_prefix("tma", sub_param)) {
- arg = get_arg(sub_param+3, argv[i+1], &i);
- if (sys_strcmp(arg,"true") == 0) {
- tuple_module_apply = 1;
- } else if (sys_strcmp(arg,"false") == 0) {
- tuple_module_apply = 0;
- } else {
- erts_fprintf(stderr, "bad tuple module apply %s\n", arg);
- erts_usage();
- }
- }
else {
erts_fprintf(stderr, "bad -z option %s\n", argv[i]);
erts_usage();
@@ -2316,8 +2288,8 @@ erl_start(int argc, char **argv)
erts_initialized = 1;
- erts_init_process_id = erl_first_process_otp("otp_ring0", NULL, 0,
- boot_argc, boot_argv);
+ erts_init_process_id = erl_first_process_otp(init, boot_argc, boot_argv);
+ ASSERT(erts_init_process_id != ERTS_INVALID_PID);
{
/*
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 1416c5f96c..3aab4828cc 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -42,6 +42,7 @@
#include "erl_term.h"
#include "erl_threads.h"
#include "erl_atom_table.h"
+#include "erl_utils.h"
typedef struct {
char *name;
@@ -91,6 +92,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "db_tab", "address" },
{ "db_tab_fix", "address" },
{ "db_hash_slot", "address" },
+ { "erl_db_catree_base_node", NULL },
+ { "erl_db_catree_route_node", "index" },
{ "resource_monitors", "address" },
{ "driver_list", NULL },
{ "proc_msgq", "pid" },
@@ -193,6 +196,12 @@ struct lc_locked_lock_t_ {
unsigned int line;
erts_lock_flags_t flags;
erts_lock_options_t taken_options;
+ /*
+ * Pointer back to the lock instance if it exists or NULL for proc locks.
+ * If set, we use it to allow trylock of other lock instance
+ * but with identical lock order as an already locked lock.
+ */
+ erts_lc_lock_t *lck;
};
typedef struct {
@@ -406,6 +415,10 @@ new_locked_lock(lc_thread_t* thr,
ll->line = line;
ll->flags = lck->flags;
ll->taken_options = options;
+ if ((lck->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_PROCLOCK)
+ ll->lck = NULL;
+ else
+ ll->lck = lck;
return ll;
}
@@ -709,6 +722,14 @@ erts_lc_get_lock_order_id(char *name)
return (Sint16) -1;
}
+static int
+lc_is_term_order(Sint16 id)
+{
+ return erts_lock_order[id].internal_order != NULL
+ && sys_strcmp(erts_lock_order[id].internal_order, "term") == 0;
+}
+
+
static int compare_locked_by_id(lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
{
if(locked_lock->id < comparand->id) {
@@ -720,18 +741,23 @@ static int compare_locked_by_id(lc_locked_lock_t *locked_lock, erts_lc_lock_t *c
return 0;
}
-static int compare_locked_by_id_extra(lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
+static int compare_locked_by_id_extra(lc_locked_lock_t *ll, erts_lc_lock_t *comparand)
{
- int order = compare_locked_by_id(locked_lock, comparand);
+ int order = compare_locked_by_id(ll, comparand);
if(order) {
return order;
- } else if(locked_lock->extra < comparand->extra) {
+ }
+ if (ll->flags & ERTS_LOCK_FLAGS_PROPERTY_TERM_ORDER) {
+ ASSERT(!is_header(ll->extra) && !is_header(comparand->extra));
+ return CMP(ll->extra, comparand->extra);
+ }
+
+ if(ll->extra < comparand->extra) {
return -1;
- } else if(locked_lock->extra > comparand->extra) {
+ } else if(ll->extra > comparand->extra) {
return 1;
}
-
return 0;
}
@@ -970,7 +996,8 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
return 0;
}
else {
- lc_locked_lock_t *tl_lck;
+ lc_locked_lock_t *ll;
+ int order;
ASSERT(thr->locked.last);
@@ -979,25 +1006,25 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
type_order_violation("trylocking ", thr, lck);
#endif
- if (thr->locked.last->id < lck->id
- || (thr->locked.last->id == lck->id
- && thr->locked.last->extra < lck->extra))
- return 0;
+ ll = thr->locked.last;
+ order = compare_locked_by_id_extra(ll, lck);
+
+ if (order < 0)
+ return 0;
/*
- * Lock order violation
+ * TryLock order violation
*/
-
- /* Check that we are not trying to lock this lock twice */
- for (tl_lck = thr->locked.last; tl_lck; tl_lck = tl_lck->prev) {
- if (tl_lck->id < lck->id
- || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
- if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", thr, lck, options);
- break;
- }
- }
+ /* Check that we are not trying to lock this lock twice */
+ do {
+ if (order == 0 && (ll->lck == lck || !ll->lck))
+ lock_twice("Trylocking", thr, lck, options);
+ ll = ll->prev;
+ if (!ll)
+ break;
+ order = compare_locked_by_id_extra(ll, lck);
+ } while (order >= 0);
#ifndef ERTS_LC_ALLWAYS_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
/* We only force busy if a lock order violation would occur
@@ -1044,10 +1071,10 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t
#endif
for (tl_lck = thr->locked.last; tl_lck; tl_lck = tl_lck->prev) {
- if (tl_lck->id < lck->id
- || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
- if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", thr, lck, options);
+ int order = compare_locked_by_id_extra(tl_lck, lck);
+ if (order <= 0) {
+ if (order == 0 && (tl_lck->lck == lck || !tl_lck->lck))
+ lock_twice("Trylocking", thr, lck, options);
if (locked) {
ll->next = tl_lck->next;
ll->prev = tl_lck;
@@ -1089,10 +1116,10 @@ void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
for (l_lck2 = thr->required.last;
l_lck2;
l_lck2 = l_lck2->prev) {
- if (l_lck2->id < lck->id
- || (l_lck2->id == lck->id && l_lck2->extra < lck->extra))
+ int order = compare_locked_by_id_extra(l_lck2, lck);
+ if (order < 0)
break;
- else if (l_lck2->id == lck->id && l_lck2->extra == lck->extra)
+ if (order == 0)
require_twice(thr, lck);
}
if (!l_lck2) {
@@ -1150,6 +1177,7 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
{
lc_thread_t *thr;
lc_locked_lock_t *new_ll;
+ int order;
if (lck->inited != ERTS_LC_INITITALIZED)
uninitialized_lock();
@@ -1165,10 +1193,10 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
thr->locked.last = thr->locked.first = new_ll;
ASSERT(0 < lck->id && lck->id < ERTS_LOCK_ORDER_SIZE);
thr->matrix.m[lck->id][0] = 1;
+ return;
}
- else if (thr->locked.last->id < lck->id
- || (thr->locked.last->id == lck->id
- && thr->locked.last->extra < lck->extra)) {
+ order = compare_locked_by_id_extra(thr->locked.last, lck);
+ if (order < 0) {
lc_locked_lock_t* ll;
if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, thr->locked.last->flags)) {
type_order_violation("locking ", thr, lck);
@@ -1186,7 +1214,7 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
thr->locked.last->next = new_ll;
thr->locked.last = new_ll;
}
- else if (thr->locked.last->id == lck->id && thr->locked.last->extra == lck->extra)
+ else if (order == 0)
lock_twice("Locking", thr, lck, options);
else
lock_order_violation(thr, lck);
@@ -1298,7 +1326,6 @@ void
erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags)
{
lck->id = erts_lc_get_lock_order_id(name);
-
lck->extra = (UWord) &lck->extra;
ASSERT(is_not_immed(lck->extra));
lck->flags = flags;
@@ -1311,8 +1338,13 @@ erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Et
{
lck->id = erts_lc_get_lock_order_id(name);
lck->extra = extra;
- ASSERT(is_immed(lck->extra));
lck->flags = flags;
+ if (lc_is_term_order(lck->id)) {
+ lck->flags |= ERTS_LOCK_FLAGS_PROPERTY_TERM_ORDER;
+ ASSERT(!is_header(lck->extra));
+ }
+ else
+ ASSERT(is_immed(lck->extra));
lck->taken_options = 0;
lck->inited = ERTS_LC_INITITALIZED;
}
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index d10e32985a..b32f27d9f9 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -104,7 +104,7 @@ Eterm erts_lc_dump_graph(void);
#define erts_lc_lock(lck) erts_lc_lock_x(lck,__FILE__,__LINE__)
#define erts_lc_trylock(res,lck) erts_lc_trylock_x(res,lck,__FILE__,__LINE__)
-#define erts_lc_lock_flg(lck) erts_lc_lock_flg_x(lck,__FILE__,__LINE__)
-#define erts_lc_trylock_flg(res,lck) erts_lc_trylock_flg_x(res,lck,__FILE__,__LINE__)
+#define erts_lc_lock_flg(lck,flg) erts_lc_lock_flg_x(lck,flg,__FILE__,__LINE__)
+#define erts_lc_trylock_flg(res,lck,flg) erts_lc_trylock_flg_x(res,lck,flg,__FILE__,__LINE__)
#endif /* #ifndef ERTS_LOCK_CHECK_H__ */
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 89d95a73cf..0d47b16e0b 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -532,7 +532,7 @@ ERTS_GLB_INLINE
void lcnt_dec_lock_state__(ethr_atomic_t *l_state) {
ethr_sint_t state = ethr_atomic_dec_read_acqb(l_state);
- /* We can not assume that state is >= -1 here; unlock and unacquire might
+ /* We cannot assume that state is >= -1 here; unlock and unacquire might
* bring it below -1 and race to increment it back. */
if(state < 0) {
diff --git a/erts/emulator/beam/erl_lock_flags.h b/erts/emulator/beam/erl_lock_flags.h
index d711f69456..2db133b598 100644
--- a/erts/emulator/beam/erl_lock_flags.h
+++ b/erts/emulator/beam/erl_lock_flags.h
@@ -28,15 +28,17 @@
/* Property/category are bitfields to simplify their use in masks. */
#define ERTS_LOCK_FLAGS_MASK_CATEGORY (0xFFC0)
-#define ERTS_LOCK_FLAGS_MASK_PROPERTY (0x0030)
+#define ERTS_LOCK_FLAGS_MASK_PROPERTY (0x0038)
/* Type is a plain number. */
-#define ERTS_LOCK_FLAGS_MASK_TYPE (0x000F)
+#define ERTS_LOCK_FLAGS_MASK_TYPE (0x0007)
#define ERTS_LOCK_FLAGS_TYPE_SPINLOCK (1)
#define ERTS_LOCK_FLAGS_TYPE_MUTEX (2)
#define ERTS_LOCK_FLAGS_TYPE_PROCLOCK (3)
+/* Lock checker use real term order instead of raw word compare */
+#define ERTS_LOCK_FLAGS_PROPERTY_TERM_ORDER (1 << 3)
/* "Static" guarantees that the lock will never be destroyed once created. */
#define ERTS_LOCK_FLAGS_PROPERTY_STATIC (1 << 4)
#define ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE (1 << 5)
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index 8f96dc3d23..62dd85e425 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -125,15 +125,20 @@ BIF_RETTYPE map_size_1(BIF_ALIST_1) {
flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
BIF_RET(make_small(flatmap_get_size(mp)));
} else if (is_hashmap(BIF_ARG_1)) {
- Eterm *head, *hp, res;
- Uint size, hsz=0;
+ Eterm *head;
+ Uint size;
head = hashmap_val(BIF_ARG_1);
size = head[1];
- (void) erts_bld_uint(NULL, &hsz, size);
- hp = HAlloc(BIF_P, hsz);
- res = erts_bld_uint(&hp, NULL, size);
- BIF_RET(res);
+
+ /*
+ * As long as a small has 28 bits (on a 32-bit machine) for
+ * the integer itself, it is impossible to build a map whose
+ * size would not fit in a small. Add an assertion in case we
+ * ever decreases the number of bits in a small.
+ */
+ ASSERT(IS_USMALL(0, size));
+ BIF_RET(make_small(size));
}
BIF_P->fvalue = BIF_ARG_1;
@@ -1505,25 +1510,6 @@ int hashmap_key_hash_cmp(Eterm* ap, Eterm* bp)
return ap ? -1 : 1;
}
-/* maps:new/0 */
-
-BIF_RETTYPE maps_new_0(BIF_ALIST_0) {
- Eterm* hp;
- Eterm tup;
- flatmap_t *mp;
-
- hp = HAlloc(BIF_P, (MAP_HEADER_FLATMAP_SZ + 1));
- tup = make_tuple(hp);
- *hp++ = make_arityval(0);
-
- mp = (flatmap_t*)hp;
- mp->thing_word = MAP_HEADER_FLATMAP;
- mp->size = 0;
- mp->keys = tup;
-
- BIF_RET(make_flatmap(mp));
-}
-
/* maps:put/3 */
BIF_RETTYPE maps_put_3(BIF_ALIST_3) {
@@ -1707,11 +1693,16 @@ int erts_maps_update(Process *p, Eterm key, Eterm value, Eterm map, Eterm *res)
return 0;
found_key:
- *hp++ = value;
- vs++;
- if (++i < n)
- sys_memcpy(hp, vs, (n - i)*sizeof(Eterm));
- *res = make_flatmap(shp);
+ if(*vs == value) {
+ HRelease(p, shp + MAP_HEADER_FLATMAP_SZ + n, shp);
+ *res = map;
+ } else {
+ *hp++ = value;
+ vs++;
+ if (++i < n)
+ sys_memcpy(hp, vs, (n - i)*sizeof(Eterm));
+ *res = make_flatmap(shp);
+ }
return 1;
}
@@ -1767,9 +1758,7 @@ Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) {
if (is_immed(key)) {
for( i = 0; i < n; i ++) {
if (ks[i] == key) {
- *hp++ = value;
- vs++;
- c = 1;
+ goto found_key;
} else {
*hp++ = *vs++;
}
@@ -1777,18 +1766,13 @@ Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) {
} else {
for( i = 0; i < n; i ++) {
if (EQ(ks[i], key)) {
- *hp++ = value;
- vs++;
- c = 1;
+ goto found_key;
} else {
*hp++ = *vs++;
}
}
}
- if (c)
- return res;
-
/* the map will grow */
if (n >= MAP_SMALL_MAP_LIMIT) {
@@ -1843,6 +1827,18 @@ Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) {
*/
*shp = make_pos_bignum_header(0);
return res;
+
+found_key:
+ if(*vs == value) {
+ HRelease(p, shp + MAP_HEADER_FLATMAP_SZ + n, shp);
+ return map;
+ } else {
+ *hp++ = value;
+ vs++;
+ if (++i < n)
+ sys_memcpy(hp, vs, (n - i)*sizeof(Eterm));
+ return res;
+ }
}
ASSERT(is_hashmap(map));
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index a3274d7443..6645341512 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -181,7 +181,7 @@ erts_cleanup_offheap(ErlOffHeap *offheap)
break;
default:
ASSERT(is_external_header(u.hdr->thing_word));
- erts_deref_node_entry(u.ext->node);
+ erts_deref_node_entry(u.ext->node, make_boxed(u.ep));
break;
}
}
@@ -201,34 +201,44 @@ free_message_buffer(ErlHeapFragment* bp)
}while (bp != NULL);
}
+static void
+erts_cleanup_message(ErtsMessage *mp)
+{
+ ErlHeapFragment *bp;
+ if (ERTS_SIG_IS_EXTERNAL_MSG(mp) || ERTS_SIG_IS_NON_MSG(mp)) {
+ ErtsDistExternal *edep = erts_proc_sig_get_external(mp);
+ if (edep) {
+ erts_free_dist_ext_copy(edep);
+ if (mp->data.heap_frag == &mp->hfrag) {
+ ASSERT(ERTS_SIG_IS_EXTERNAL_MSG(mp));
+ mp->data.heap_frag = ERTS_MSG_COMBINED_HFRAG;
+ }
+ }
+ }
+
+ if (ERTS_SIG_IS_MSG(mp) && mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
+ bp = mp->data.heap_frag;
+ } else {
+ /* All non msg signals are combined HFRAG messages,
+ but we overwrite the mp->data field with the
+ nm_signal queue ptr so have to fix that here
+ before freeing it. */
+ mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ bp = mp->hfrag.next;
+ erts_cleanup_offheap(&mp->hfrag.off_heap);
+ }
+
+ if (bp)
+ free_message_buffer(bp);
+}
+
void
erts_cleanup_messages(ErtsMessage *msgp)
{
ErtsMessage *mp = msgp;
while (mp) {
ErtsMessage *fmp;
- ErlHeapFragment *bp;
- if (ERTS_SIG_IS_EXTERNAL_MSG(mp)) {
- if (is_not_immed(ERL_MESSAGE_TOKEN(mp))) {
- bp = (ErlHeapFragment *) mp->data.dist_ext->ext_endp;
- erts_cleanup_offheap(&bp->off_heap);
- }
- if (mp->data.dist_ext)
- erts_free_dist_ext_copy(mp->data.dist_ext);
- }
- else {
- if (ERTS_SIG_IS_INTERNAL_MSG(mp)
- && mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
- bp = mp->data.heap_frag;
- }
- else {
- mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
- bp = mp->hfrag.next;
- erts_cleanup_offheap(&mp->hfrag.off_heap);
- }
- if (bp)
- free_message_buffer(bp);
- }
+ erts_cleanup_message(mp);
fmp = mp;
mp = mp->next;
erts_free_message(fmp);
@@ -260,6 +270,7 @@ void
erts_queue_dist_message(Process *rcvr,
ErtsProcLocks rcvr_locks,
ErtsDistExternal *dist_ext,
+ ErlHeapFragment *hfrag,
Eterm token,
Eterm from)
{
@@ -268,8 +279,26 @@ erts_queue_dist_message(Process *rcvr,
ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
- mp = erts_alloc_message(0, NULL);
- mp->data.dist_ext = dist_ext;
+ if (hfrag) {
+ /* Fragmented message, allocate a message reference */
+ mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = hfrag;
+ } else {
+ /* Un-fragmented message, allocate space for
+ token and dist_ext in message. */
+ Uint dist_ext_sz = erts_dist_ext_size(dist_ext) / sizeof(Eterm);
+ Uint token_sz = size_object(token);
+ Uint sz = token_sz + dist_ext_sz;
+ Eterm *hp;
+
+ mp = erts_alloc_message(sz, &hp);
+ mp->data.heap_frag = &mp->hfrag;
+ mp->hfrag.used_size = token_sz;
+
+ erts_make_dist_ext_copy(dist_ext, erts_get_dist_ext(mp->data.heap_frag));
+
+ token = copy_struct(token, token_sz, &hp, &mp->data.heap_frag->off_heap);
+ }
ERL_MESSAGE_FROM(mp) = dist_ext->dep->sysname;
ERL_MESSAGE_TERM(mp) = THE_NON_VALUE;
@@ -493,25 +522,27 @@ Uint
erts_msg_attached_data_size_aux(ErtsMessage *msg)
{
Sint sz;
- ASSERT(is_non_value(ERL_MESSAGE_TERM(msg)));
- ASSERT(msg->data.dist_ext);
- ASSERT(msg->data.dist_ext->heap_size < 0);
-
- sz = erts_decode_dist_ext_size(msg->data.dist_ext);
- if (sz < 0) {
- /* Bad external
- * We leave the message intact in this case as it's not worth the trouble
- * to make all callers remove it from queue. It will be detected again
- * and removed from message queue later anyway.
- */
- return 0;
- }
+ ErtsDistExternal *edep = erts_get_dist_ext(msg->data.heap_frag);
+ ASSERT(ERTS_SIG_IS_EXTERNAL_MSG(msg));
- msg->data.dist_ext->heap_size = sz;
- if (is_not_nil(msg->m[1])) {
- ErlHeapFragment *heap_frag;
- heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
- sz += heap_frag->used_size;
+ if (edep->heap_size < 0) {
+
+ sz = erts_decode_dist_ext_size(edep, 1);
+ if (sz < 0) {
+ /* Bad external
+ * We leave the message intact in this case as it's not worth the trouble
+ * to make all callers remove it from queue. It will be detected again
+ * and removed from message queue later anyway.
+ */
+ return 0;
+ }
+
+ edep->heap_size = sz;
+ } else {
+ sz = edep->heap_size;
+ }
+ if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
+ sz += msg->data.heap_frag->used_size;
}
return sz;
}
@@ -532,9 +563,7 @@ erts_try_alloc_message_on_heap(Process *pp,
if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
goto in_message_fragment;
- else if (
- *plp & ERTS_PROC_LOCK_MAIN
- ) {
+ else if (*plp & ERTS_PROC_LOCK_MAIN) {
try_on_heap:
if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
|| (pp->flags & F_DISABLE_GC)
@@ -747,6 +776,13 @@ erts_send_message(Process* sender,
#endif
erts_queue_proc_message(sender, receiver, *receiver_locks, mp, message);
+
+ if (msize > ERTS_MSG_COPY_WORDS_PER_REDUCTION) {
+ Uint reds = msize / ERTS_MSG_COPY_WORDS_PER_REDUCTION;
+ if (reds > CONTEXT_REDS)
+ reds = CONTEXT_REDS;
+ BUMP_REDS(sender, (int) reds);
+ }
}
@@ -1101,84 +1137,28 @@ change_to_off_heap:
return res;
}
-int
-erts_decode_dist_message(Process *proc, ErtsProcLocks proc_locks,
- ErtsMessage *msgp, int force_off_heap)
+void erts_factory_proc_init(ErtsHeapFactory* factory, Process* p)
{
- ErtsHeapFactory factory;
- Eterm msg;
- ErlHeapFragment *bp;
- Sint need;
- int decode_in_heap_frag;
-
- decode_in_heap_frag = (force_off_heap
- || !(proc_locks & ERTS_PROC_LOCK_MAIN)
- || (proc->flags & F_OFF_HEAP_MSGQ));
-
- if (msgp->data.dist_ext->heap_size >= 0)
- need = msgp->data.dist_ext->heap_size;
- else {
- need = erts_decode_dist_ext_size(msgp->data.dist_ext);
- if (need < 0) {
- /* bad msg; remove it... */
- if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) {
- bp = erts_dist_ext_trailer(msgp->data.dist_ext);
- erts_cleanup_offheap(&bp->off_heap);
- }
- erts_free_dist_ext_copy(msgp->data.dist_ext);
- msgp->data.dist_ext = NULL;
- return 0;
- }
-
- msgp->data.dist_ext->heap_size = need;
- }
-
- if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) {
- bp = erts_dist_ext_trailer(msgp->data.dist_ext);
- need += bp->used_size;
- }
-
- if (decode_in_heap_frag)
- erts_factory_heap_frag_init(&factory, new_message_buffer(need));
- else
- erts_factory_proc_prealloc_init(&factory, proc, need);
-
- ASSERT(msgp->data.dist_ext->heap_size >= 0);
- if (is_not_immed(ERL_MESSAGE_TOKEN(msgp))) {
- ErlHeapFragment *heap_frag;
- heap_frag = erts_dist_ext_trailer(msgp->data.dist_ext);
- ERL_MESSAGE_TOKEN(msgp) = copy_struct(ERL_MESSAGE_TOKEN(msgp),
- heap_frag->used_size,
- &factory.hp,
- factory.off_heap);
- erts_cleanup_offheap(&heap_frag->off_heap);
- }
-
- msg = erts_decode_dist_ext(&factory, msgp->data.dist_ext);
- ERL_MESSAGE_TERM(msgp) = msg;
- erts_free_dist_ext_copy(msgp->data.dist_ext);
- msgp->data.attached = NULL;
-
- if (is_non_value(msg)) {
- erts_factory_undo(&factory);
- return 0;
- }
-
- erts_factory_trim_and_close(&factory, msgp->m,
- ERL_MESSAGE_REF_ARRAY_SZ);
-
- ASSERT(!msgp->data.heap_frag);
-
- if (decode_in_heap_frag)
- msgp->data.heap_frag = factory.heap_frags;
-
- return 1;
-}
+ /* This function does not use HAlloc to allocate on the heap
+ as we do not want to use INIT_HEAP_MEM on the allocated
+ heap as that completely destroys the DEBUG emulators
+ performance. */
+ ErlHeapFragment *bp = p->mbuf;
+ factory->mode = FACTORY_HALLOC;
+ factory->p = p;
+ factory->hp_start = HEAP_TOP(p);
+ factory->hp = factory->hp_start;
+ factory->hp_end = HEAP_LIMIT(p);
+ factory->off_heap = &p->off_heap;
+ factory->message = NULL;
+ factory->off_heap_saved.first = p->off_heap.first;
+ factory->off_heap_saved.overhead = p->off_heap.overhead;
+ factory->heap_frags_saved = bp;
+ factory->heap_frags_saved_used = bp ? bp->used_size : 0;
+ factory->heap_frags = NULL; /* not used */
+ factory->alloc_type = 0; /* not used */
-void erts_factory_proc_init(ErtsHeapFactory* factory,
- Process* p)
-{
- erts_factory_proc_prealloc_init(factory, p, HEAP_LIMIT(p) - HEAP_TOP(p));
+ HEAP_TOP(p) = HEAP_LIMIT(p);
}
void erts_factory_proc_prealloc_init(ErtsHeapFactory* factory,
@@ -1235,7 +1215,7 @@ erts_factory_message_create(ErtsHeapFactory* factory,
int on_heap;
erts_aint32_t state;
- state = proc ? erts_atomic32_read_nob(&proc->state) : 0;
+ state = proc ? erts_atomic32_read_nob(&proc->state) : ERTS_PSFLG_OFF_HEAP_MSGQ;
if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
msgp = erts_alloc_message(sz, &hp);
@@ -1468,8 +1448,8 @@ void erts_factory_close(ErtsHeapFactory* factory)
else
factory->message->data.heap_frag = factory->heap_frags;
- /* Fall through */
- case FACTORY_HEAP_FRAGS:
+ /* Fall through */
+ case FACTORY_HEAP_FRAGS:
bp = factory->heap_frags;
}
@@ -1610,6 +1590,9 @@ void erts_factory_undo(ErtsHeapFactory* factory)
factory->message->hfrag.next = factory->heap_frags;
else
factory->message->data.heap_frag = factory->heap_frags;
+ /* Set the message to NIL in order for this message not to be
+ treated as a distributed message by the cleanup_messages logic */
+ factory->message->m[0] = NIL;
erts_cleanup_messages(factory->message);
break;
case FACTORY_TMP:
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index b2550814fd..5bd25737a7 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -22,10 +22,17 @@
#define __ERL_MESSAGE_H__
#include "sys.h"
+#include "erl_vm.h"
#define ERTS_PROC_SIG_QUEUE_TYPE_ONLY
#include "erl_proc_sig_queue.h"
#undef ERTS_PROC_SIG_QUEUE_TYPE_ONLY
+#ifdef DEBUG
+#define ERTS_MSG_COPY_WORDS_PER_REDUCTION 4
+#else
+#define ERTS_MSG_COPY_WORDS_PER_REDUCTION 64
+#endif
+
struct proc_bin;
struct external_thing_;
@@ -111,6 +118,7 @@ erts_produce_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
}
res = factory->hp;
factory->hp += need;
+ INIT_HEAP_MEM(res, need);
return res;
}
@@ -138,7 +146,7 @@ typedef struct erl_heap_fragment ErlHeapFragment;
struct erl_heap_fragment {
ErlHeapFragment* next; /* Next heap fragment */
ErlOffHeap off_heap; /* Offset heap data. */
- Uint alloc_size; /* Size in (half)words of mem */
+ Uint alloc_size; /* Size in words of mem */
Uint used_size; /* With terms to be moved to heap by GC */
Eterm mem[1]; /* Data */
};
@@ -167,7 +175,6 @@ struct erl_heap_fragment {
#define ERL_MESSAGE_REF_FIELDS__ \
ErtsMessage *next; /* Next message */ \
union { \
- ErtsDistExternal *dist_ext; \
ErlHeapFragment *heap_frag; \
void *attached; \
} data; \
@@ -438,7 +445,8 @@ ErlHeapFragment* new_message_buffer(Uint);
ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint,
Eterm *, Uint);
void free_message_buffer(ErlHeapFragment *);
-void erts_queue_dist_message(Process*, ErtsProcLocks, ErtsDistExternal *, Eterm, Eterm);
+void erts_queue_dist_message(Process*, ErtsProcLocks, ErtsDistExternal *,
+ ErlHeapFragment *, Eterm, Eterm);
void erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm);
void erts_queue_proc_message(Process* from,Process* to, ErtsProcLocks,ErtsMessage*, Eterm);
void erts_queue_proc_messages(Process* from, Process* to, ErtsProcLocks,
@@ -455,8 +463,6 @@ Sint erts_move_messages_off_heap(Process *c_p);
Sint erts_complete_off_heap_message_queue_change(Process *c_p);
Eterm erts_change_message_queue_management(Process *c_p, Eterm new_state);
-int erts_decode_dist_message(Process *, ErtsProcLocks, ErtsMessage *, int);
-
void erts_cleanup_messages(ErtsMessage *mp);
void *erts_alloc_message_ref(void);
@@ -585,22 +591,11 @@ ERTS_GLB_INLINE Uint erts_used_frag_sz(const ErlHeapFragment* bp)
ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErtsMessage *msg)
{
ASSERT(msg->data.attached);
- if (is_value(ERL_MESSAGE_TERM(msg))) {
- ErlHeapFragment *bp;
- bp = erts_message_to_heap_frag(msg);
- return erts_used_frag_sz(bp);
- }
- else if (msg->data.dist_ext->heap_size < 0)
- return erts_msg_attached_data_size_aux(msg);
- else {
- Uint sz = msg->data.dist_ext->heap_size;
- if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
- ErlHeapFragment *heap_frag;
- heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
- sz += heap_frag->used_size;
- }
- return sz;
- }
+
+ if (ERTS_SIG_IS_INTERNAL_MSG(msg))
+ return erts_used_frag_sz(erts_message_to_heap_frag(msg));
+
+ return erts_msg_attached_data_size_aux(msg);
}
#endif
diff --git a/erts/emulator/beam/erl_monitor_link.c b/erts/emulator/beam/erl_monitor_link.c
index 48d9bd4ca5..1c6b4afaa3 100644
--- a/erts/emulator/beam/erl_monitor_link.c
+++ b/erts/emulator/beam/erl_monitor_link.c
@@ -191,7 +191,8 @@ ml_cmp_keys(Eterm key1, Eterm key2)
if (n1->sysname != n2->sysname)
return n1->sysname < n2->sysname ? -1 : 1;
ASSERT(n1->creation != n2->creation);
- return n1->creation < n2->creation ? -1 : 1;
+ if (n1->creation != 0 && n2->creation != 0)
+ return n1->creation < n2->creation ? -1 : 1;
}
ndw1 = external_thing_data_words(et1);
@@ -335,7 +336,7 @@ ml_rbt_delete(ErtsMonLnkNode **root, ErtsMonLnkNode *ml)
static void
ml_rbt_foreach(ErtsMonLnkNode *root,
- void (*func)(ErtsMonLnkNode *, void *),
+ ErtsMonLnkNodeFunc func,
void *arg)
{
mon_lnk_rbt_foreach(root, func, arg);
@@ -348,7 +349,7 @@ typedef struct {
static int
ml_rbt_foreach_yielding(ErtsMonLnkNode *root,
- void (*func)(ErtsMonLnkNode *, void *),
+ ErtsMonLnkNodeFunc func,
void *arg,
void **vyspp,
Sint limit)
@@ -362,7 +363,7 @@ ml_rbt_foreach_yielding(ErtsMonLnkNode *root,
ysp = &ys;
res = mon_lnk_rbt_foreach_yielding(ysp->root, func, arg,
&ysp->rbt_ystate, limit);
- if (res == 0) {
+ if (res > 0) {
if (ysp != &ys)
erts_free(ERTS_ALC_T_ML_YIELD_STATE, ysp);
*vyspp = NULL;
@@ -383,22 +384,22 @@ ml_rbt_foreach_yielding(ErtsMonLnkNode *root,
}
typedef struct {
- void (*func)(ErtsMonLnkNode *, void *);
+ ErtsMonLnkNodeFunc func;
void *arg;
} ErtsMonLnkForeachDeleteContext;
-static void
-rbt_wrap_foreach_delete(ErtsMonLnkNode *ml, void *vctxt)
+static int
+rbt_wrap_foreach_delete(ErtsMonLnkNode *ml, void *vctxt, Sint reds)
{
ErtsMonLnkForeachDeleteContext *ctxt = vctxt;
ERTS_ML_ASSERT(ml->flags & ERTS_ML_FLG_IN_TABLE);
ml->flags &= ~ERTS_ML_FLG_IN_TABLE;
- ctxt->func(ml, ctxt->arg);
+ return ctxt->func(ml, ctxt->arg, reds);
}
static void
ml_rbt_foreach_delete(ErtsMonLnkNode **root,
- void (*func)(ErtsMonLnkNode *, void *),
+ ErtsMonLnkNodeFunc func,
void *arg)
{
ErtsMonLnkForeachDeleteContext ctxt;
@@ -411,7 +412,7 @@ ml_rbt_foreach_delete(ErtsMonLnkNode **root,
static int
ml_rbt_foreach_delete_yielding(ErtsMonLnkNode **root,
- void (*func)(ErtsMonLnkNode *, void *),
+ ErtsMonLnkNodeFunc func,
void *arg,
void **vyspp,
Sint limit)
@@ -433,7 +434,7 @@ ml_rbt_foreach_delete_yielding(ErtsMonLnkNode **root,
(void *) &ctxt,
&ysp->rbt_ystate,
limit);
- if (res == 0) {
+ if (res > 0) {
if (ysp != &ys)
erts_free(ERTS_ALC_T_ML_YIELD_STATE, ysp);
*vyspp = NULL;
@@ -459,12 +460,11 @@ ml_rbt_foreach_delete_yielding(ErtsMonLnkNode **root,
static int
ml_dl_list_foreach_yielding(ErtsMonLnkNode *list,
- void (*func)(ErtsMonLnkNode *, void *),
+ ErtsMonLnkNodeFunc func,
void *arg,
void **vyspp,
- Sint limit)
+ Sint reds)
{
- Sint cnt = 0;
ErtsMonLnkNode *ml = (ErtsMonLnkNode *) *vyspp;
ERTS_ML_ASSERT(!ml || list);
@@ -475,28 +475,26 @@ ml_dl_list_foreach_yielding(ErtsMonLnkNode *list,
if (ml) {
do {
ERTS_ML_ASSERT(ml->flags & ERTS_ML_FLG_IN_TABLE);
- func(ml, arg);
+ reds -= func(ml, arg, reds);
ml = ml->node.list.next;
- cnt++;
- } while (ml != list && cnt < limit);
+ } while (ml != list && reds > 0);
if (ml != list) {
*vyspp = (void *) ml;
- return 1; /* yield */
+ return 0; /* yield */
}
}
*vyspp = NULL;
- return 0; /* done */
+ return reds <= 0 ? 1 : reds; /* done */
}
static int
ml_dl_list_foreach_delete_yielding(ErtsMonLnkNode **list,
- void (*func)(ErtsMonLnkNode *, void *),
+ ErtsMonLnkNodeFunc func,
void *arg,
void **vyspp,
- Sint limit)
+ Sint reds)
{
- Sint cnt = 0;
ErtsMonLnkNode *first = *list;
ErtsMonLnkNode *ml = (ErtsMonLnkNode *) *vyspp;
@@ -510,19 +508,18 @@ ml_dl_list_foreach_delete_yielding(ErtsMonLnkNode **list,
ErtsMonLnkNode *next = ml->node.list.next;
ERTS_ML_ASSERT(ml->flags & ERTS_ML_FLG_IN_TABLE);
ml->flags &= ~ERTS_ML_FLG_IN_TABLE;
- func(ml, arg);
+ reds -= func(ml, arg, reds);
ml = next;
- cnt++;
- } while (ml != first && cnt < limit);
+ } while (ml != first && reds > 0);
if (ml != first) {
*vyspp = (void *) ml;
- return 1; /* yield */
+ return 0; /* yield */
}
}
*vyspp = NULL;
*list = NULL;
- return 0; /* done */
+ return reds <= 0 ? 1 : reds; /* done */
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
@@ -666,91 +663,91 @@ erts_monitor_tree_delete(ErtsMonitor **root, ErtsMonitor *mon)
void
erts_monitor_tree_foreach(ErtsMonitor *root,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg)
{
ml_rbt_foreach((ErtsMonLnkNode *) root,
- (void (*)(ErtsMonLnkNode*, void*)) func,
+ (ErtsMonLnkNodeFunc) func,
arg);
}
int
erts_monitor_tree_foreach_yielding(ErtsMonitor *root,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg,
void **vyspp,
Sint limit)
{
return ml_rbt_foreach_yielding((ErtsMonLnkNode *) root,
- (void (*)(ErtsMonLnkNode*, void*)) func,
+ (int (*)(ErtsMonLnkNode*, void*, Sint)) func,
arg, vyspp, limit);
}
void
erts_monitor_tree_foreach_delete(ErtsMonitor **root,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg)
{
ml_rbt_foreach_delete((ErtsMonLnkNode **) root,
- (void (*)(ErtsMonLnkNode*, void*)) func,
+ (int (*)(ErtsMonLnkNode*, void*, Sint)) func,
arg);
}
int
erts_monitor_tree_foreach_delete_yielding(ErtsMonitor **root,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg,
void **vyspp,
Sint limit)
{
return ml_rbt_foreach_delete_yielding((ErtsMonLnkNode **) root,
- (void (*)(ErtsMonLnkNode*, void*)) func,
+ (int (*)(ErtsMonLnkNode*, void*, Sint)) func,
arg, vyspp, limit);
}
void
erts_monitor_list_foreach(ErtsMonitor *list,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg)
{
void *ystate = NULL;
- while (ml_dl_list_foreach_yielding((ErtsMonLnkNode *) list,
- (void (*)(ErtsMonLnkNode *, void *)) func,
- arg, &ystate, (Sint) INT_MAX));
+ while (!ml_dl_list_foreach_yielding((ErtsMonLnkNode *) list,
+ (int (*)(ErtsMonLnkNode *, void *, Sint)) func,
+ arg, &ystate, (Sint) INT_MAX));
}
int
erts_monitor_list_foreach_yielding(ErtsMonitor *list,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg,
void **vyspp,
Sint limit)
{
return ml_dl_list_foreach_yielding((ErtsMonLnkNode *) list,
- (void (*)(ErtsMonLnkNode *, void *)) func,
+ (int (*)(ErtsMonLnkNode *, void *, Sint)) func,
arg, vyspp, limit);
}
void
erts_monitor_list_foreach_delete(ErtsMonitor **list,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg)
{
void *ystate = NULL;
- while (ml_dl_list_foreach_delete_yielding((ErtsMonLnkNode **) list,
- (void (*)(ErtsMonLnkNode*, void*)) func,
- arg, &ystate, (Sint) INT_MAX));
+ while (!ml_dl_list_foreach_delete_yielding((ErtsMonLnkNode **) list,
+ (int (*)(ErtsMonLnkNode*, void*, Sint)) func,
+ arg, &ystate, (Sint) INT_MAX));
}
int
erts_monitor_list_foreach_delete_yielding(ErtsMonitor **list,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg,
void **vyspp,
Sint limit)
{
return ml_dl_list_foreach_delete_yielding((ErtsMonLnkNode **) list,
- (void (*)(ErtsMonLnkNode*, void*)) func,
+ (int (*)(ErtsMonLnkNode*, void*, Sint)) func,
arg, vyspp, limit);
}
@@ -1074,92 +1071,92 @@ erts_link_tree_delete(ErtsLink **root, ErtsLink *lnk)
void
erts_link_tree_foreach(ErtsLink *root,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg)
{
ml_rbt_foreach((ErtsMonLnkNode *) root,
- (void (*)(ErtsMonLnkNode*, void*)) func,
+ (ErtsMonLnkNodeFunc) func,
arg);
}
int
erts_link_tree_foreach_yielding(ErtsLink *root,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg,
void **vyspp,
Sint limit)
{
return ml_rbt_foreach_yielding((ErtsMonLnkNode *) root,
- (void (*)(ErtsMonLnkNode*, void*)) func,
+ (ErtsMonLnkNodeFunc) func,
arg, vyspp, limit);
}
void
erts_link_tree_foreach_delete(ErtsLink **root,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg)
{
ml_rbt_foreach_delete((ErtsMonLnkNode **) root,
- (void (*)(ErtsMonLnkNode*, void*)) func,
+ (ErtsMonLnkNodeFunc) func,
arg);
}
int
erts_link_tree_foreach_delete_yielding(ErtsLink **root,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg,
void **vyspp,
Sint limit)
{
return ml_rbt_foreach_delete_yielding((ErtsMonLnkNode **) root,
- (void (*)(ErtsMonLnkNode*, void*)) func,
+ (ErtsMonLnkNodeFunc) func,
arg, vyspp, limit);
}
void
erts_link_list_foreach(ErtsLink *list,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg)
{
void *ystate = NULL;
- while (ml_dl_list_foreach_yielding((ErtsMonLnkNode *) list,
- (void (*)(ErtsMonLnkNode *, void *)) func,
- arg, &ystate, (Sint) INT_MAX));
+ while (!ml_dl_list_foreach_yielding((ErtsMonLnkNode *) list,
+ (ErtsMonLnkNodeFunc) func,
+ arg, &ystate, (Sint) INT_MAX));
}
int
erts_link_list_foreach_yielding(ErtsLink *list,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg,
void **vyspp,
Sint limit)
{
return ml_dl_list_foreach_yielding((ErtsMonLnkNode *) list,
- (void (*)(ErtsMonLnkNode *, void *)) func,
+ (ErtsMonLnkNodeFunc) func,
arg, vyspp, limit);
}
void
erts_link_list_foreach_delete(ErtsLink **list,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg)
{
void *ystate = NULL;
- while (ml_dl_list_foreach_delete_yielding((ErtsMonLnkNode **) list,
- (void (*)(ErtsMonLnkNode*, void*)) func,
- arg, &ystate, (Sint) INT_MAX));
+ while (!ml_dl_list_foreach_delete_yielding((ErtsMonLnkNode **) list,
+ (ErtsMonLnkNodeFunc) func,
+ arg, &ystate, (Sint) INT_MAX));
}
int
erts_link_list_foreach_delete_yielding(ErtsLink **list,
- void (*func)(ErtsLink *, void *),
+ int (*func)(ErtsLink *, void *, Sint),
void *arg,
void **vyspp,
Sint limit)
{
return ml_dl_list_foreach_delete_yielding((ErtsMonLnkNode **) list,
- (void (*)(ErtsMonLnkNode*, void*)) func,
+ (ErtsMonLnkNodeFunc) func,
arg, vyspp, limit);
}
diff --git a/erts/emulator/beam/erl_monitor_link.h b/erts/emulator/beam/erl_monitor_link.h
index ed7bf7d54a..eff861fce8 100644
--- a/erts/emulator/beam/erl_monitor_link.h
+++ b/erts/emulator/beam/erl_monitor_link.h
@@ -439,6 +439,7 @@
(ERTS_ML_FLG_EXTENDED|ERTS_ML_FLG_NAME)
typedef struct ErtsMonLnkNode__ ErtsMonLnkNode;
+typedef int (*ErtsMonLnkNodeFunc)(ErtsMonLnkNode *, void *, Sint);
typedef struct {
UWord parent; /* Parent ptr and flags... */
@@ -622,6 +623,7 @@ erts_ml_dl_list_last__(ErtsMonLnkNode *list)
typedef struct ErtsMonLnkNode__ ErtsMonitor;
+typedef int (*ErtsMonitorFunc)(ErtsMonitor *, void *, Sint);
typedef struct {
ErtsMonitor origin;
@@ -653,6 +655,7 @@ struct ErtsMonitorDataExtended__ {
typedef struct ErtsMonitorSuspend__ ErtsMonitorSuspend;
+
struct ErtsMonitorSuspend__ {
ErtsMonitorData md; /* origin = suspender; target = suspendee */
ErtsMonitorSuspend *next;
@@ -685,7 +688,7 @@ ErtsMonitor *erts_monitor_tree_lookup(ErtsMonitor *root, Eterm key);
*
* @brief Lookup or insert a monitor in a monitor tree
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'mon' monitor is not part of any tree or list
* If the above is not true, bad things will happen.
*
@@ -711,7 +714,7 @@ ErtsMonitor *erts_monotor_tree_lookup_insert(ErtsMonitor **root,
* If it is not found, creates a monitor and returns a pointer to the
* origin monitor.
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - no target monitors with the key 'target' exists in the tree.
* If the above is not true, bad things will happen.
*
@@ -738,7 +741,7 @@ ErtsMonitor *erts_monitor_tree_lookup_create(ErtsMonitor **root, int *created,
*
* @brief Insert a monitor in a monitor tree
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - no monitors with the same key that 'mon' exist in the tree
* - 'mon' is not part of any list of tree
* If the above are not true, bad things will happen.
@@ -754,7 +757,7 @@ void erts_monitor_tree_insert(ErtsMonitor **root, ErtsMonitor *mon);
*
* @brief Replace a monitor in a monitor tree
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'old' monitor and 'new' monitor have exactly the same key
* - 'old' monitor is part of the tree
* - 'new' monitor is not part of any tree or list
@@ -774,7 +777,7 @@ void erts_monitor_tree_replace(ErtsMonitor **root, ErtsMonitor *old,
*
* @brief Delete a monitor from a monitor tree
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'mon' monitor is part of the tree
* If the above is not true, bad things will happen.
*
@@ -789,7 +792,7 @@ void erts_monitor_tree_delete(ErtsMonitor **root, ErtsMonitor *mon);
*
* @brief Call a function for each monitor in a monitor tree
*
- * The funcion 'func' will be called with a pointer to a monitor
+ * The function 'func' will be called with a pointer to a monitor
* as first argument and 'arg' as second argument for each monitor
* in the tree referred to by 'root'.
*
@@ -802,7 +805,7 @@ void erts_monitor_tree_delete(ErtsMonitor **root, ErtsMonitor *mon);
*
*/
void erts_monitor_tree_foreach(ErtsMonitor *root,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg);
/**
@@ -810,9 +813,10 @@ void erts_monitor_tree_foreach(ErtsMonitor *root,
* @brief Call a function for each monitor in a monitor tree. Yield
* if lots of monitors exist.
*
- * The funcion 'func' will be called with a pointer to a monitor
+ * The function 'func' will be called with a pointer to a monitor
* as first argument and 'arg' as second argument for each monitor
- * in the tree referred to by 'root'.
+ * in the tree referred to by 'root'. It should return the number of
+ * reductions the operator took to perform.
*
* It is assumed that:
* - *yspp equals NULL on first call
@@ -835,27 +839,28 @@ void erts_monitor_tree_foreach(ErtsMonitor *root,
* *yspp should be NULL. When done *yspp
* will be NULL.
*
- * @param[in] limit Maximum amount of monitors to process
- * before yielding.
+ * @param[in] reds Reductions available to execute before yielding.
*
- * @returns A non-zero value when all monitors has been
- * processed, and zero when more work is needed.
+ * @returns The unconsumed reductions when all monitors
+ * have been processed, and zero when more work
+ * is needed.
*
*/
int erts_monitor_tree_foreach_yielding(ErtsMonitor *root,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg,
void **vyspp,
- Sint limit);
+ Sint reds);
/**
*
* @brief Delete all monitors from a monitor tree and call a function for
* each monitor
*
- * The funcion 'func' will be called with a pointer to a monitor
+ * The function 'func' will be called with a pointer to a monitor
* as first argument and 'arg' as second argument for each monitor
- * in the tree referred to by 'root'.
+ * in the tree referred to by 'root'. It should return the number of
+ * reductions the operator took to perform.
*
* @param[in,out] root Pointer to pointer to root of monitor tree
*
@@ -866,7 +871,7 @@ int erts_monitor_tree_foreach_yielding(ErtsMonitor *root,
*
*/
void erts_monitor_tree_foreach_delete(ErtsMonitor **root,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg);
/**
@@ -874,9 +879,10 @@ void erts_monitor_tree_foreach_delete(ErtsMonitor **root,
* @brief Delete all monitors from a monitor tree and call a function for
* each monitor
*
- * The funcion 'func' will be called with a pointer to a monitor
+ * The function 'func' will be called with a pointer to a monitor
* as first argument and 'arg' as second argument for each monitor
- * in the tree referred to by 'root'.
+ * in the tree referred to by 'root'. It should return the number of
+ * reductions the operator took to perform.
*
* It is assumed that:
* - *yspp equals NULL on first call
@@ -899,18 +905,18 @@ void erts_monitor_tree_foreach_delete(ErtsMonitor **root,
* *yspp should be NULL. When done *yspp
* will be NULL.
*
- * @param[in] limit Maximum amount of monitors to process
- * before yielding.
+ * @param[in] reds Reductions available to execute before yielding.
*
- * @returns A non-zero value when all monitors has been
- * processed, and zero when more work is needed.
+ * @returns The unconsumed reductions when all monitors
+ * have been processed, and zero when more work
+ * is needed.
*
*/
int erts_monitor_tree_foreach_delete_yielding(ErtsMonitor **root,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg,
void **vyspp,
- Sint limit);
+ Sint reds);
/*
* --- Monitor list operations --
@@ -920,7 +926,7 @@ int erts_monitor_tree_foreach_delete_yielding(ErtsMonitor **root,
*
* @brief Insert a monitor in a monitor list
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'mon' monitor is not part of any list or tree
* If the above is not true, bad things will happen.
*
@@ -935,7 +941,7 @@ ERTS_GLB_INLINE void erts_monitor_list_insert(ErtsMonitor **list, ErtsMonitor *m
*
* @brief Delete a monitor from a monitor list
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'mon' monitor is part of the list
* If the above is not true, bad things will happen.
*
@@ -980,7 +986,7 @@ ERTS_GLB_INLINE ErtsMonitor *erts_monitor_list_last(ErtsMonitor *list);
*
* @brief Call a function for each monitor in a monitor list
*
- * The funcion 'func' will be called with a pointer to a monitor
+ * The function 'func' will be called with a pointer to a monitor
* as first argument and 'arg' as second argument for each monitor
* in the tree referred to by 'list'.
*
@@ -993,7 +999,7 @@ ERTS_GLB_INLINE ErtsMonitor *erts_monitor_list_last(ErtsMonitor *list);
*
*/
void erts_monitor_list_foreach(ErtsMonitor *list,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg);
/**
@@ -1001,9 +1007,10 @@ void erts_monitor_list_foreach(ErtsMonitor *list,
* @brief Call a function for each monitor in a monitor list. Yield
* if lots of monitors exist.
*
- * The funcion 'func' will be called with a pointer to a monitor
+ * The function 'func' will be called with a pointer to a monitor
* as first argument and 'arg' as second argument for each monitor
- * in the tree referred to by 'root'.
+ * in the tree referred to by 'root'. It should return the number of
+ * reductions the operator took to perform.
*
* It is assumed that:
* - *yspp equals NULL on first call
@@ -1026,25 +1033,25 @@ void erts_monitor_list_foreach(ErtsMonitor *list,
* *yspp should be NULL. When done *yspp
* will be NULL.
*
- * @param[in] limit Maximum amount of monitors to process
- * before yielding.
+ * @param[in] reds Reductions available to execute before yielding.
*
- * @returns A non-zero value when all monitors has been
- * processed, and zero when more work is needed.
+ * @returns The unconsumed reductions when all monitors
+ * have been processed, and zero when more work
+ * is needed.
*
*/
int erts_monitor_list_foreach_yielding(ErtsMonitor *list,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg,
void **vyspp,
- Sint limit);
+ Sint reds);
/**
*
* @brief Delete all monitors from a monitor list and call a function for
* each monitor
*
- * The funcion 'func' will be called with a pointer to a monitor
+ * The function 'func' will be called with a pointer to a monitor
* as first argument and 'arg' as second argument for each monitor
* in the tree referred to by 'root'.
*
@@ -1057,7 +1064,7 @@ int erts_monitor_list_foreach_yielding(ErtsMonitor *list,
*
*/
void erts_monitor_list_foreach_delete(ErtsMonitor **list,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg);
/**
@@ -1065,9 +1072,10 @@ void erts_monitor_list_foreach_delete(ErtsMonitor **list,
* @brief Delete all monitors from a monitor list and call a function for
* each monitor
*
- * The funcion 'func' will be called with a pointer to a monitor
+ * The function 'func' will be called with a pointer to a monitor
* as first argument and 'arg' as second argument for each monitor
- * in the tree referred to by 'root'.
+ * in the tree referred to by 'root'. It should return the number of
+ * reductions the operator took to perform.
*
* It is assumed that:
* - *yspp equals NULL on first call
@@ -1090,18 +1098,18 @@ void erts_monitor_list_foreach_delete(ErtsMonitor **list,
* *yspp should be NULL. When done *yspp
* will be NULL.
*
- * @param[in] limit Maximum amount of monitors to process
- * before yielding.
+ * @param[in] reds Reductions available to execute before yielding.
*
- * @returns A non-zero value when all monitors has been
- * processed, and zero when more work is needed.
+ * @returns The unconsumed reductions when all monitors
+ * have been processed, and zero when more work
+ * is needed.
*
*/
int erts_monitor_list_foreach_delete_yielding(ErtsMonitor **list,
- void (*func)(ErtsMonitor *, void *),
+ ErtsMonitorFunc func,
void *arg,
void **vyspp,
- Sint limit);
+ Sint reds);
/*
* --- Misc monitor operations ---
@@ -1113,7 +1121,7 @@ int erts_monitor_list_foreach_delete_yielding(ErtsMonitor **list,
*
* Can create all types of monitors
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'ref' is an internal ordinary reference if type is ERTS_MON_TYPE_PROC,
* ERTS_MON_TYPE_PORT, ERTS_MON_TYPE_TIME_OFFSET, or ERTS_MON_TYPE_RESOURCE
* - 'ref' is NIL if type is ERTS_MON_TYPE_NODE, ERTS_MON_TYPE_NODES, or
@@ -1199,7 +1207,7 @@ ERTS_GLB_INLINE int erts_monitor_is_in_table(ErtsMonitor *mon);
* When both the origin and the target part of the monitor have
* been released the monitor structure will be deallocated.
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'mon' monitor is not part of any list or tree
* - 'mon' is not referred to by any other structures
* If the above are not true, bad things will happen.
@@ -1216,7 +1224,7 @@ ERTS_GLB_INLINE void erts_monitor_release(ErtsMonitor *mon);
* Release both the origin and target parts of the monitor
* simultaneously and deallocate the structure.
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - Neither the origin part nor the target part of the monitor
* are not part of any list or tree
* - Neither the origin part nor the target part of the monitor
@@ -1232,7 +1240,7 @@ ERTS_GLB_INLINE void erts_monitor_release_both(ErtsMonitorData *mdp);
*
* @brief Insert monitor in dist monitor tree or list
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'mon' monitor is not part of any list or tree
* If the above is not true, bad things will happen.
*
@@ -1253,7 +1261,7 @@ ERTS_GLB_INLINE int erts_monitor_dist_insert(ErtsMonitor *mon, ErtsMonLnkDist *d
*
* @brief Delete monitor from dist monitor tree or list
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'mon' monitor earler has been inserted into 'dist'
* If the above is not true, bad things will happen.
*
@@ -1291,7 +1299,7 @@ erts_monitor_set_dead_dist(ErtsMonitor *mon, Eterm nodename);
* whole size of the monitor data structure is returned; otherwise,
* half of the size is returned.
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'mon' has not been released
* If the above is not true, bad things will happen.
*
@@ -1507,6 +1515,8 @@ ERTS_GLB_INLINE ErtsMonitorSuspend *erts_monitor_suspend(ErtsMonitor *mon)
typedef struct ErtsMonLnkNode__ ErtsLink;
+typedef int (*ErtsLinkFunc)(ErtsLink *, void *, Sint);
+
typedef struct {
ErtsLink a;
ErtsLink b;
@@ -1544,7 +1554,7 @@ ErtsLink *erts_link_tree_lookup(ErtsLink *root, Eterm item);
*
* @brief Lookup or insert a link in a link tree
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'lnk' link is not part of any tree or list
* If the above is not true, bad things will happen.
*
@@ -1590,7 +1600,7 @@ ErtsLink *erts_link_tree_lookup_create(ErtsLink **root, int *created,
*
* @brief Insert a link in a link tree
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - no links with the same key that 'lnk' exist in the tree
* - 'lnk' is not part of any list of tree
* If the above are not true, bad things will happen.
@@ -1606,7 +1616,7 @@ void erts_link_tree_insert(ErtsLink **root, ErtsLink *lnk);
*
* @brief Replace a link in a link tree
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'old' link and 'new' link have exactly the same key
* - 'old' link is part of the tree
* - 'new' link is not part of any tree or list
@@ -1630,7 +1640,7 @@ void erts_link_tree_replace(ErtsLink **root, ErtsLink *old, ErtsLink *new);
* the tree and 'lnk' has a lower address than the link in the
* tree, the existing link in the tree is replaced by 'lnk'.
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'lnk' link is not part of any tree or list
* If the above are not true, bad things will happen.
*
@@ -1649,7 +1659,7 @@ ERTS_GLB_INLINE ErtsLink *erts_link_tree_insert_addr_replace(ErtsLink **root,
*
* @brief Delete a link from a link tree
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'lnk' link is part of the tree
* If the above is not true, bad things will happen.
*
@@ -1668,7 +1678,7 @@ void erts_link_tree_delete(ErtsLink **root, ErtsLink *lnk);
* If link 'lnk' is not in the tree, another link with the same
* key as 'lnk' is deleted from the tree if such a link exist.
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - if 'lnk' link is part of a tree or list, it is part of this tree
* If the above is not true, bad things will happen.
*
@@ -1687,7 +1697,7 @@ ERTS_GLB_INLINE ErtsLink *erts_link_tree_key_delete(ErtsLink **root, ErtsLink *l
*
* @brief Call a function for each link in a link tree
*
- * The funcion 'func' will be called with a pointer to a link
+ * The function 'func' will be called with a pointer to a link
* as first argument and 'arg' as second argument for each link
* in the tree referred to by 'root'.
*
@@ -1700,7 +1710,7 @@ ERTS_GLB_INLINE ErtsLink *erts_link_tree_key_delete(ErtsLink **root, ErtsLink *l
*
*/
void erts_link_tree_foreach(ErtsLink *root,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc,
void *arg);
/**
@@ -1708,9 +1718,10 @@ void erts_link_tree_foreach(ErtsLink *root,
* @brief Call a function for each link in a link tree. Yield if lots
* of links exist.
*
- * The funcion 'func' will be called with a pointer to a link
+ * The function 'func' will be called with a pointer to a link
* as first argument and 'arg' as second argument for each link
- * in the tree referred to by 'root'.
+ * in the tree referred to by 'root'. It should return the number of
+ * reductions the operator took to perform.
*
* It is assumed that:
* - *yspp equals NULL on first call
@@ -1733,25 +1744,25 @@ void erts_link_tree_foreach(ErtsLink *root,
* *yspp should be NULL. When done *yspp
* will be NULL.
*
- * @param[in] limit Maximum amount of links to process
- * before yielding.
+ * @param[in] reds Reductions available to execute before yielding.
*
- * @returns A non-zero value when all links has been
- * processed, and zero when more work is needed.
+ * @returns The unconsumed reductions when all links
+ * have been processed, and zero when more work
+ * is needed.
*
*/
int erts_link_tree_foreach_yielding(ErtsLink *root,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg,
void **vyspp,
- Sint limit);
+ Sint reds);
/**
*
* @brief Delete all links from a link tree and call a function for
* each link
*
- * The funcion 'func' will be called with a pointer to a link
+ * The function 'func' will be called with a pointer to a link
* as first argument and 'arg' as second argument for each link
* in the tree referred to by 'root'.
*
@@ -1764,7 +1775,7 @@ int erts_link_tree_foreach_yielding(ErtsLink *root,
*
*/
void erts_link_tree_foreach_delete(ErtsLink **root,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg);
/**
@@ -1772,9 +1783,10 @@ void erts_link_tree_foreach_delete(ErtsLink **root,
* @brief Delete all links from a link tree and call a function for
* each link
*
- * The funcion 'func' will be called with a pointer to a link
+ * The function 'func' will be called with a pointer to a link
* as first argument and 'arg' as second argument for each link
- * in the tree referred to by 'root'.
+ * in the tree referred to by 'root'. It should return the number of
+ * reductions the operator took to perform.
*
* It is assumed that:
* - *yspp equals NULL on first call
@@ -1797,18 +1809,18 @@ void erts_link_tree_foreach_delete(ErtsLink **root,
* *yspp should be NULL. When done *yspp
* will be NULL.
*
- * @param[in] limit Maximum amount of links to process
- * before yielding.
+ * @param[in] reds Reductions available to execute before yielding.
*
- * @returns A non-zero value when all links has been
- * processed, and zero when more work is needed.
+ * @returns The unconsumed reductions when all links
+ * have been processed, and zero when more work
+ * is needed.
*
*/
int erts_link_tree_foreach_delete_yielding(ErtsLink **root,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg,
void **vyspp,
- Sint limit);
+ Sint reds);
/*
* --- Link list operations ---
@@ -1818,7 +1830,7 @@ int erts_link_tree_foreach_delete_yielding(ErtsLink **root,
*
* @brief Insert a link in a link list
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'lnk' link is not part of any list or tree
* If the above is not true, bad things will happen.
*
@@ -1833,7 +1845,7 @@ ERTS_GLB_INLINE void erts_link_list_insert(ErtsLink **list, ErtsLink *lnk);
*
* @brief Delete a link from a link list
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'lnk' link is part of the list
* If the above is not true, bad things will happen.
*
@@ -1878,7 +1890,7 @@ ERTS_GLB_INLINE ErtsLink *erts_link_list_last(ErtsLink *list);
*
* @brief Call a function for each link in a link list
*
- * The funcion 'func' will be called with a pointer to a link
+ * The function 'func' will be called with a pointer to a link
* as first argument and 'arg' as second argument for each link
* in the tree referred to by 'list'.
*
@@ -1891,7 +1903,7 @@ ERTS_GLB_INLINE ErtsLink *erts_link_list_last(ErtsLink *list);
*
*/
void erts_link_list_foreach(ErtsLink *list,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg);
/**
@@ -1899,9 +1911,10 @@ void erts_link_list_foreach(ErtsLink *list,
* @brief Call a function for each link in a link list. Yield
* if lots of links exist.
*
- * The funcion 'func' will be called with a pointer to a link
+ * The function 'func' will be called with a pointer to a link
* as first argument and 'arg' as second argument for each link
- * in the tree referred to by 'root'.
+ * in the tree referred to by 'root'. It should return the number of
+ * reductions the operator took to perform.
*
* It is assumed that:
* - *yspp equals NULL on first call
@@ -1924,25 +1937,25 @@ void erts_link_list_foreach(ErtsLink *list,
* *yspp should be NULL. When done *yspp
* will be NULL.
*
- * @param[in] limit Maximum amount of links to process
- * before yielding.
+ * @param[in] reds Reductions available to execute before yielding.
*
- * @returns A non-zero value when all links has been
- * processed, and zero when more work is needed.
+ * @returns The unconsumed reductions when all links
+ * have been processed, and zero when more work
+ * is needed.
*
*/
int erts_link_list_foreach_yielding(ErtsLink *list,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg,
void **vyspp,
- Sint limit);
+ Sint reds);
/**
*
* @brief Delete all links from a link list and call a function for
* each link
*
- * The funcion 'func' will be called with a pointer to a link
+ * The function 'func' will be called with a pointer to a link
* as first argument and 'arg' as second argument for each link
* in the tree referred to by 'root'.
*
@@ -1955,7 +1968,7 @@ int erts_link_list_foreach_yielding(ErtsLink *list,
*
*/
void erts_link_list_foreach_delete(ErtsLink **list,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg);
/**
@@ -1963,9 +1976,10 @@ void erts_link_list_foreach_delete(ErtsLink **list,
* @brief Delete all links from a link list and call a function for
* each link
*
- * The funcion 'func' will be called with a pointer to a link
+ * The function 'func' will be called with a pointer to a link
* as first argument and 'arg' as second argument for each link
- * in the tree referred to by 'root'.
+ * in the tree referred to by 'root'. It should return the number of
+ * reductions the operator took to perform.
*
* It is assumed that:
* - *yspp equals NULL on first call
@@ -1988,18 +2002,18 @@ void erts_link_list_foreach_delete(ErtsLink **list,
* *yspp should be NULL. When done *yspp
* will be NULL.
*
- * @param[in] limit Maximum amount of links to process
- * before yielding.
+ * @param[in] reds Reductions available to execute before yielding.
*
- * @returns A non-zero value when all links has been
- * processed, and zero when more work is needed.
+ * @returns The unconsumed reductions when all links
+ * have been processed, and zero when more work
+ * is needed.
*
*/
int erts_link_list_foreach_delete_yielding(ErtsLink **list,
- void (*func)(ErtsLink *, void *),
+ ErtsLinkFunc func,
void *arg,
void **vyspp,
- Sint limit);
+ Sint reds);
/*
* --- Misc link operations ---
@@ -2011,7 +2025,7 @@ int erts_link_list_foreach_delete_yielding(ErtsLink **list,
*
* Can create all types of links
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'ref' is an internal ordinary reference if type is ERTS_MON_TYPE_PROC,
* ERTS_MON_TYPE_PORT, ERTS_MON_TYPE_TIME_OFFSET, or ERTS_MON_TYPE_RESOURCE
* - 'ref' is NIL if type is ERTS_MON_TYPE_NODE or ERTS_MON_TYPE_NODES
@@ -2081,7 +2095,7 @@ ERTS_GLB_INLINE int erts_link_is_in_table(ErtsLink *lnk);
* When both link halves part of the link have been released the link
* structure will be deallocated.
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'lnk' link is not part of any list or tree
* - 'lnk' is not referred to by any other structures
* If the above are not true, bad things will happen.
@@ -2098,7 +2112,7 @@ ERTS_GLB_INLINE void erts_link_release(ErtsLink *lnk);
* Release both halves of a link simultaneously and deallocate
* the structure.
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - Neither of the parts of the link are part of any list or tree
* - Neither of the parts of the link or the link data structure
* are referred to by any other structures
@@ -2113,7 +2127,7 @@ ERTS_GLB_INLINE void erts_link_release_both(ErtsLinkData *ldp);
*
* @brief Insert link in dist link list
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'lnk' link is not part of any list or tree
* If the above is not true, bad things will happen.
*
@@ -2134,7 +2148,7 @@ ERTS_GLB_INLINE int erts_link_dist_insert(ErtsLink *lnk, ErtsMonLnkDist *dist);
*
* @brief Delete link from dist link list
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'lnk' link earler has been inserted into 'dist'
* If the above is not true, bad things will happen.
*
@@ -2172,7 +2186,7 @@ erts_link_set_dead_dist(ErtsLink *lnk, Eterm nodename);
* whole size of the link data structure is returned; otherwise,
* half of the size is returned.
*
- * When the funcion is called it is assumed that:
+ * When the function is called it is assumed that:
* - 'lnk' has not been released
* If the above is not true, bad things will happen.
*
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index ebef485b04..1fbe362330 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -707,6 +707,46 @@ error:
return reds;
}
+/** @brief Create a message with the content of process independent \c msg_env.
+ * Invalidates \c msg_env.
+ */
+ErtsMessage* erts_create_message_from_nif_env(ErlNifEnv* msg_env)
+{
+ struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
+ ErtsMessage* mp;
+
+ flush_env(msg_env);
+ mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = menv->env.heap_frag;
+ ASSERT(mp->data.heap_frag == MBUF(&menv->phony_proc));
+ if (mp->data.heap_frag != NULL) {
+ /* Move all offheap's from phony proc to the first fragment.
+ Quick and dirty... */
+ ASSERT(!is_offheap(&mp->data.heap_frag->off_heap));
+ mp->data.heap_frag->off_heap = MSO(&menv->phony_proc);
+ clear_offheap(&MSO(&menv->phony_proc));
+ menv->env.heap_frag = NULL;
+ MBUF(&menv->phony_proc) = NULL;
+ }
+ return mp;
+}
+
+static ERTS_INLINE ERL_NIF_TERM make_copy(ErlNifEnv* dst_env,
+ ERL_NIF_TERM src_term,
+ Uint *cpy_szp)
+{
+ Uint sz;
+ Eterm* hp;
+ /*
+ * No preserved sharing allowed as long as literals are also preserved.
+ * Process independent environment can not be reached by purge.
+ */
+ sz = size_object(src_term);
+ if (cpy_szp)
+ *cpy_szp += sz;
+ hp = alloc_heap(dst_env, sz);
+ return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc));
+}
int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ErlNifEnv* msg_env, ERL_NIF_TERM msg)
@@ -720,6 +760,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
Eterm from;
Eterm receiver = to_pid->pid;
int scheduler;
+ Uint copy_sz = 0;
execution_state(env, &c_p, &scheduler);
@@ -783,14 +824,14 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
stoken = NIL;
}
#endif
- token = enif_make_copy(msg_env, stoken);
+ token = make_copy(msg_env, stoken, &copy_sz);
#ifdef USE_VM_PROBES
if (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING) {
if (is_immed(DT_UTAG(c_p)))
utag = DT_UTAG(c_p);
else
- utag = enif_make_copy(msg_env, DT_UTAG(c_p));
+ utag = make_copy(msg_env, DT_UTAG(c_p), &copy_sz);
}
if (DTRACE_ENABLED(message_send)) {
if (have_seqtrace(stoken)) {
@@ -803,20 +844,8 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
}
#endif
}
- flush_env(msg_env);
- mp = erts_alloc_message(0, NULL);
+ mp = erts_create_message_from_nif_env(msg_env);
ERL_MESSAGE_TOKEN(mp) = token;
- mp->data.heap_frag = menv->env.heap_frag;
- ASSERT(mp->data.heap_frag == MBUF(&menv->phony_proc));
- if (mp->data.heap_frag != NULL) {
- /* Move all offheap's from phony proc to the first fragment.
- Quick and dirty... */
- ASSERT(!is_offheap(&mp->data.heap_frag->off_heap));
- mp->data.heap_frag->off_heap = MSO(&menv->phony_proc);
- clear_offheap(&MSO(&menv->phony_proc));
- menv->env.heap_frag = NULL;
- MBUF(&menv->phony_proc) = NULL;
- }
} else {
erts_literal_area_t litarea;
ErlOffHeap *ohp;
@@ -824,6 +853,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
Uint sz;
INITIALIZE_LITERAL_PURGE_AREA(litarea);
sz = size_object_litopt(msg, &litarea);
+ copy_sz += sz;
if (c_p && !env->tracee) {
full_flush_env(env);
mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
@@ -856,6 +886,12 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
trace_send(c_p, receiver, msg);
full_cache_env(env);
}
+ if (c_p && scheduler > 0 && copy_sz > ERTS_MSG_COPY_WORDS_PER_REDUCTION) {
+ Uint reds = copy_sz / ERTS_MSG_COPY_WORDS_PER_REDUCTION;
+ if (reds > CONTEXT_REDS)
+ reds = CONTEXT_REDS;
+ BUMP_REDS(c_p, (int) reds);
+ }
}
else {
/* This clause is taken when the nif is called in the context
@@ -924,6 +960,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
erts_queue_message(rp, rp_locks, mp, msg, from);
done:
+
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks & ~lc_locks)
@@ -987,7 +1024,7 @@ static Eterm call_whereis(ErlNifEnv *env, Eterm name)
int scheduler;
execution_state(env, &c_p, &scheduler);
- ASSERT((c_p && scheduler) || (!c_p && !scheduler));
+ ASSERT(scheduler || !c_p);
if (scheduler < 0) {
/* dirty scheduler */
@@ -1036,18 +1073,9 @@ int enif_whereis_port(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port)
ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
{
- Uint sz;
- Eterm* hp;
- /*
- * No preserved sharing allowed as long as literals are also preserved.
- * Process independent environment can not be reached by purge.
- */
- sz = size_object(src_term);
- hp = alloc_heap(dst_env, sz);
- return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc));
+ return make_copy(dst_env, src_term, NULL);
}
-
#ifdef DEBUG
static int is_offheap(const ErlOffHeap* oh)
{
@@ -1072,6 +1100,17 @@ int enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid)
return 0;
}
+void enif_set_pid_undefined(ErlNifPid* pid)
+{
+ pid->pid = am_undefined;
+}
+
+int enif_is_pid_undefined(const ErlNifPid* pid)
+{
+ ASSERT(pid->pid == am_undefined || is_internal_pid(pid->pid));
+ return pid->pid == am_undefined;
+}
+
int enif_get_local_port(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPort* port)
{
if (is_internal_port(term)) {
@@ -1136,6 +1175,47 @@ int enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term)
return is_number(term);
}
+ErlNifTermType enif_term_type(ErlNifEnv* env, ERL_NIF_TERM term) {
+ (void)env;
+
+ switch (tag_val_def(term)) {
+ case ATOM_DEF:
+ return ERL_NIF_TERM_TYPE_ATOM;
+ case BINARY_DEF:
+ return ERL_NIF_TERM_TYPE_BITSTRING;
+ case FLOAT_DEF:
+ return ERL_NIF_TERM_TYPE_FLOAT;
+ case EXPORT_DEF:
+ case FUN_DEF:
+ return ERL_NIF_TERM_TYPE_FUN;
+ case BIG_DEF:
+ case SMALL_DEF:
+ return ERL_NIF_TERM_TYPE_INTEGER;
+ case LIST_DEF:
+ case NIL_DEF:
+ return ERL_NIF_TERM_TYPE_LIST;
+ case MAP_DEF:
+ return ERL_NIF_TERM_TYPE_MAP;
+ case EXTERNAL_PID_DEF:
+ case PID_DEF:
+ return ERL_NIF_TERM_TYPE_PID;
+ case EXTERNAL_PORT_DEF:
+ case PORT_DEF:
+ return ERL_NIF_TERM_TYPE_PORT;
+ case EXTERNAL_REF_DEF:
+ case REF_DEF:
+ return ERL_NIF_TERM_TYPE_REFERENCE;
+ case TUPLE_DEF:
+ return ERL_NIF_TERM_TYPE_TUPLE;
+ default:
+ /* tag_val_def() aborts on its own when passed complete garbage, but
+ * it's possible that the user has given us garbage that just happens
+ * to match something that tag_val_def() accepts but we don't, like
+ * binary match contexts. */
+ ERTS_INTERNAL_ERROR("Invalid term passed to enif_term_type");
+ }
+}
+
static void aligned_binary_dtor(struct enif_tmp_obj_t* obj)
{
erts_free_aligned_binary_bytes_extra((byte*)obj, obj->allocator);
@@ -1264,11 +1344,18 @@ unsigned char* enif_make_new_binary(ErlNifEnv* env, size_t size,
int enif_term_to_binary(ErlNifEnv *dst_env, ERL_NIF_TERM term,
ErlNifBinary *bin)
{
- Sint size;
+ Uint size;
byte *bp;
Binary* refbin;
- size = erts_encode_ext_size(term);
+ switch (erts_encode_ext_size(term, &size)) {
+ case ERTS_EXT_SZ_SYSTEM_LIMIT:
+ return 0; /* system limit */
+ case ERTS_EXT_SZ_YIELD:
+ ERTS_INTERNAL_ERROR("Unexpected yield");
+ case ERTS_EXT_SZ_OK:
+ break;
+ }
if (!enif_alloc_binary(size, bin))
return 0;
@@ -2346,12 +2433,13 @@ rmon_refc_read(ErtsResourceMonitors *rms)
return rms->refc & ERTS_RESOURCE_REFC_MASK;
}
-static void dtor_demonitor(ErtsMonitor* mon, void* context)
+static int dtor_demonitor(ErtsMonitor* mon, void* context, Sint reds)
{
ASSERT(erts_monitor_is_origin(mon));
ASSERT(is_internal_pid(mon->other.item));
erts_proc_sig_send_demonitor(mon);
+ return 1;
}
#ifdef DEBUG
@@ -2361,10 +2449,26 @@ int erts_dbg_is_resource_dying(ErtsResource* resource)
}
#endif
-# define NIF_RESOURCE_DTOR &nif_resource_dtor
+#define NIF_RESOURCE_DTOR &nif_resource_dtor_prologue
-static int nif_resource_dtor(Binary* bin)
+static void run_resource_dtor(void* vbin);
+
+static int nif_resource_dtor_prologue(Binary* bin)
{
+ /*
+ * Schedule user resource destructor as aux work to get a context
+ * where we know what locks we have for example.
+ */
+ Uint sched_id = erts_get_scheduler_id();
+ if (!sched_id)
+ sched_id = 1;
+ erts_schedule_misc_aux_work(sched_id, run_resource_dtor, bin);
+ return 0; /* don't free */
+}
+
+static void run_resource_dtor(void* vbin)
+{
+ Binary* bin = (Binary*) vbin;
ErtsResource* resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
ErlNifResourceType* type = resource->type;
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
@@ -2396,11 +2500,11 @@ static int nif_resource_dtor(Binary* bin)
* If resource->monitors->refc != 0 there are
* outstanding references to the resource from
* monitors that has not been removed yet.
- * nif_resource_dtor() will be called again this
+ * nif_resource_dtor_prologue() will be called again when this
* reference count reach zero.
*/
if (refc != 0)
- return 0; /* we'll be back... */
+ return; /* we'll be back... */
erts_mtx_destroy(&rm->lock);
}
@@ -2417,7 +2521,7 @@ static int nif_resource_dtor(Binary* bin)
steal_resource_type(type);
erts_free(ERTS_ALC_T_NIF, type);
}
- return 1;
+ erts_magic_binary_free((Binary*)vbin);
}
void erts_resource_stop(ErtsResource* resource, ErlNifEvent e,
@@ -3329,6 +3433,9 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
}
ASSERT(rsrc->type->down);
+ if (target_pid->pid == am_undefined)
+ return 1;
+
ref = erts_make_ref_in_buffer(tmp);
mdp = erts_monitor_create(ERTS_MON_TYPE_RESOURCE, ref,
@@ -3362,6 +3469,12 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
return 0;
}
+ERL_NIF_TERM enif_make_monitor_term(ErlNifEnv* env, const ErlNifMonitor* monitor)
+{
+ Eterm* hp = alloc_heap(env, ERTS_REF_THING_SIZE);
+ return erts_driver_monitor_to_ref(hp, monitor);
+}
+
int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monitor)
{
ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 4c09496ef1..c250e8f683 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -54,10 +54,22 @@
** 2.13: 20.1 add enif_ioq
** 2.14: 21.0 add enif_ioq_peek_head, enif_(mutex|cond|rwlock|thread)_name
** enif_vfprintf, enif_vsnprintf, enif_make_map_from_arrays
+** 2.15: 22.0 ERL_NIF_SELECT_CANCEL, enif_select_(read|write)
+** enif_term_type
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 14
-#define ERL_NIF_MIN_ERTS_VERSION "erts-10.0 (OTP-21)"
+#define ERL_NIF_MINOR_VERSION 15
+
+/*
+ * WHEN CHANGING INTERFACE VERSION, also replace erts version below with
+ * a ticket number e.g. "erts-@OTP-12345@". The syntax is the same as for
+ * runtime dependencies so multiple tickets should be separated with ":", e.g.
+ * "erts-@OTP-12345:OTP-54321@".
+ *
+ * If you're not on the OTP team, you should use a placeholder like
+ * erts-@MyName@ instead.
+ */
+#define ERL_NIF_MIN_ERTS_VERSION "erts-10.4"
/*
* The emulator will refuse to load a nif-lib with a major version
@@ -160,6 +172,8 @@ typedef int ErlNifEvent;
#define ERL_NIF_SELECT_STOP_SCHEDULED (1 << 1)
#define ERL_NIF_SELECT_INVALID_EVENT (1 << 2)
#define ERL_NIF_SELECT_FAILED (1 << 3)
+#define ERL_NIF_SELECT_READ_CANCELLED (1 << 4)
+#define ERL_NIF_SELECT_WRITE_CANCELLED (1 << 5)
typedef enum
{
@@ -274,6 +288,26 @@ typedef enum {
ERL_NIF_IOQ_NORMAL = 1
} ErlNifIOQueueOpts;
+typedef enum {
+ ERL_NIF_TERM_TYPE_ATOM = 1,
+ ERL_NIF_TERM_TYPE_BITSTRING = 2,
+ ERL_NIF_TERM_TYPE_FLOAT = 3,
+ ERL_NIF_TERM_TYPE_FUN = 4,
+ ERL_NIF_TERM_TYPE_INTEGER = 5,
+ ERL_NIF_TERM_TYPE_LIST = 6,
+ ERL_NIF_TERM_TYPE_MAP = 7,
+ ERL_NIF_TERM_TYPE_PID = 8,
+ ERL_NIF_TERM_TYPE_PORT = 9,
+ ERL_NIF_TERM_TYPE_REFERENCE = 10,
+ ERL_NIF_TERM_TYPE_TUPLE = 11,
+
+ /* This is a dummy value intended to coax the compiler into warning about
+ * unhandled values in a switch even if all the above values have been
+ * handled. We can add new entries at any time so the user must always
+ * have a default case. */
+ ERL_NIF_TERM_TYPE__MISSING_DEFAULT_CASE__READ_THE_MANUAL = -1
+} ErlNifTermType;
+
/*
* Return values from enif_thread_type(). Negative values
* reserved for specific types of non-scheduler threads.
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index 81f64f2390..d57f6ec97c 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -210,6 +210,13 @@ ERL_NIF_API_FUNC_DECL(int,enif_vsnprintf,(char*, size_t, const char *fmt, va_lis
ERL_NIF_API_FUNC_DECL(int,enif_make_map_from_arrays,(ErlNifEnv *env, ERL_NIF_TERM keys[], ERL_NIF_TERM values[], size_t cnt, ERL_NIF_TERM *map_out));
+ERL_NIF_API_FUNC_DECL(int,enif_select_x,(ErlNifEnv* env, ErlNifEvent e, enum ErlNifSelectFlags flags, void* obj, const ErlNifPid* pid, ERL_NIF_TERM msg, ErlNifEnv* msg_env));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_monitor_term,(ErlNifEnv* env, const ErlNifMonitor*));
+ERL_NIF_API_FUNC_DECL(void,enif_set_pid_undefined,(ErlNifPid* pid));
+ERL_NIF_API_FUNC_DECL(int,enif_is_pid_undefined,(const ErlNifPid* pid));
+
+ERL_NIF_API_FUNC_DECL(ErlNifTermType,enif_term_type,(ErlNifEnv* env, ERL_NIF_TERM term));
+
/*
** ADD NEW ENTRIES HERE (before this comment) !!!
*/
@@ -392,6 +399,11 @@ ERL_NIF_API_FUNC_DECL(int,enif_make_map_from_arrays,(ErlNifEnv *env, ERL_NIF_TER
# define enif_vfprintf ERL_NIF_API_FUNC_MACRO(enif_vfprintf)
# define enif_vsnprintf ERL_NIF_API_FUNC_MACRO(enif_vsnprintf)
# define enif_make_map_from_arrays ERL_NIF_API_FUNC_MACRO(enif_make_map_from_arrays)
+# define enif_select_x ERL_NIF_API_FUNC_MACRO(enif_select_x)
+# define enif_make_monitor_term ERL_NIF_API_FUNC_MACRO(enif_make_monitor_term)
+# define enif_set_pid_undefined ERL_NIF_API_FUNC_MACRO(enif_set_pid_undefined)
+# define enif_is_pid_undefined ERL_NIF_API_FUNC_MACRO(enif_is_pid_undefined)
+# define enif_term_type ERL_NIF_API_FUNC_MACRO(enif_term_type)
/*
** ADD NEW ENTRIES HERE (before this comment)
@@ -623,6 +635,13 @@ static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list9(ErlNifEnv* env,
#ifndef enif_make_pid
# define enif_make_pid(ENV, PID) ((void)(ENV),(const ERL_NIF_TERM)((PID)->pid))
+# define enif_compare_pids(A, B) (enif_compare((A)->pid,(B)->pid))
+# define enif_select_read(ENV, E, OBJ, PID, MSG, MSG_ENV) \
+ enif_select_x(ENV, E, ERL_NIF_SELECT_READ | ERL_NIF_SELECT_CUSTOM_MSG, \
+ OBJ, PID, MSG, MSG_ENV)
+# define enif_select_write(ENV, E, OBJ, PID, MSG, MSG_ENV) \
+ enif_select_x(ENV, E, ERL_NIF_SELECT_WRITE | ERL_NIF_SELECT_CUSTOM_MSG, \
+ OBJ, PID, MSG, MSG_ENV)
#if SIZEOF_LONG == 8
# define enif_get_int64 enif_get_long
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 18ed782ae3..4eb6c3e214 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -177,6 +177,7 @@ dist_table_alloc(void *dep_tmpl)
dep->connection_id = 0;
dep->state = ERTS_DE_STATE_IDLE;
dep->flags = 0;
+ dep->opts = 0;
dep->version = 0;
dep->mld = NULL;
@@ -201,6 +202,7 @@ dist_table_alloc(void *dep_tmpl)
dep->send = NULL;
dep->cache = NULL;
dep->transcode_ctx = NULL;
+ dep->sequences = NULL;
/* Link in */
@@ -658,6 +660,7 @@ erts_set_dist_entry_not_connected(DistEntry *dep)
dep->state = ERTS_DE_STATE_IDLE;
dep->flags = 0;
+ dep->opts = 0;
dep->prev = NULL;
dep->cid = NIL;
@@ -801,8 +804,8 @@ node_table_hash(void *venp)
static int
node_table_cmp(void *venp1, void *venp2)
{
- return ((((ErlNode *) venp1)->sysname == ((ErlNode *) venp2)->sysname
- && ((ErlNode *) venp1)->creation == ((ErlNode *) venp2)->creation)
+ return ((((ErlNode *) venp1)->sysname == ((ErlNode *) venp2)->sysname) &&
+ ((((ErlNode *) venp1)->creation == ((ErlNode *) venp2)->creation))
? 0
: 1);
}
@@ -816,11 +819,16 @@ node_table_alloc(void *venp_tmpl)
node_entries++;
- erts_refc_init(&enp->refc, -1);
+ erts_init_node_entry(enp, -1);
enp->creation = ((ErlNode *) venp_tmpl)->creation;
enp->sysname = ((ErlNode *) venp_tmpl)->sysname;
enp->dist_entry = erts_find_or_insert_dist_entry(((ErlNode *) venp_tmpl)->sysname);
+#ifdef ERL_NODE_BOOKKEEP
+ erts_atomic_init_nob(&enp->slot, 0);
+ sys_memzero(enp->books, sizeof(struct erl_node_bookkeeping) * 1024);
+#endif
+
return (void *) enp;
}
@@ -873,7 +881,7 @@ erts_node_table_info(fmtfn_t to, void *to_arg)
}
-ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation)
+ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation, Eterm book)
{
ErlNode *res;
ErlNode ne;
@@ -883,9 +891,9 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation)
erts_rwmtx_rlock(&erts_node_table_rwmtx);
res = hash_get(&erts_node_table, (void *) &ne);
if (res && res != erts_this_node) {
- erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
+ erts_aint_t refc = erts_ref_node_entry(res, 0, book);
if (refc < 2) /* New or pending delete */
- erts_refc_inc(&res->refc, 1);
+ erts_ref_node_entry(res, 1, THE_NON_VALUE);
}
erts_rwmtx_runlock(&erts_node_table_rwmtx);
if (res)
@@ -895,9 +903,9 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation)
res = hash_put(&erts_node_table, (void *) &ne);
ASSERT(res);
if (res != erts_this_node) {
- erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
+ erts_aint_t refc = erts_ref_node_entry(res, 0, book);
if (refc < 2) /* New or pending delete */
- erts_refc_inc(&res->refc, 1);
+ erts_ref_node_entry(res, 1, THE_NON_VALUE);
}
erts_rwmtx_rwunlock(&erts_node_table_rwmtx);
return res;
@@ -924,6 +932,7 @@ static void try_delete_node(void *venp)
*
* If refc > 0, the entry is in use. Keep the entry.
*/
+ erts_node_bookkeep(enp, THE_NON_VALUE, ERL_NODE_DEC);
refc = erts_refc_dectest(&enp->refc, -1);
if (refc == -1)
(void) hash_erase(&erts_node_table, (void *) enp);
@@ -1021,7 +1030,7 @@ erts_set_this_node(Eterm sysname, Uint creation)
erts_deref_dist_entry(erts_this_dist_entry);
erts_this_node = NULL; /* to make sure refc is bumped for this node */
- erts_this_node = erts_find_or_insert_node(sysname, creation);
+ erts_this_node = erts_find_or_insert_node(sysname, creation, THE_NON_VALUE);
erts_this_dist_entry = erts_this_node->dist_entry;
erts_ref_dist_entry(erts_this_dist_entry);
@@ -1090,7 +1099,7 @@ void erts_init_node_tables(int dd_sec)
node_tmpl.creation = 0;
erts_this_node = hash_put(&erts_node_table, &node_tmpl);
/* +1 for erts_this_node */
- erts_refc_init(&erts_this_node->refc, 1);
+ erts_init_node_entry(erts_this_node, 1);
ASSERT(erts_this_node->dist_entry != NULL);
erts_this_dist_entry = erts_this_node->dist_entry;
@@ -1177,6 +1186,7 @@ static Eterm AM_system;
static Eterm AM_timer;
static Eterm AM_delayed_delete_timer;
static Eterm AM_thread_progress_delete_timer;
+static Eterm AM_sequence;
static Eterm AM_signal;
static void setup_reference_table(void);
@@ -1218,6 +1228,7 @@ typedef struct dist_referrer_ {
int ctrl_ref;
int system_ref;
int signal_ref;
+ int sequence_ref;
Eterm id;
Uint creation;
Uint id_heap[ID_HEAP_SIZE];
@@ -1272,6 +1283,7 @@ erts_get_node_and_dist_references(struct process *proc)
INIT_AM(delayed_delete_timer);
INIT_AM(thread_progress_delete_timer);
INIT_AM(signal);
+ INIT_AM(sequence);
references_atoms_need_init = 0;
}
@@ -1309,8 +1321,9 @@ erts_get_node_and_dist_references(struct process *proc)
#define TIMER_REF 8
#define SYSTEM_REF 9
#define SIGNAL_REF 10
+#define SEQUENCE_REF 11
-#define INC_TAB_SZ 10
+#define INC_TAB_SZ 11
static void
insert_dist_referrer(ReferredDist *referred_dist,
@@ -1344,6 +1357,7 @@ insert_dist_referrer(ReferredDist *referred_dist,
drp->ctrl_ref = 0;
drp->system_ref = 0;
drp->signal_ref = 0;
+ drp->sequence_ref = 0;
}
switch (type) {
@@ -1353,6 +1367,7 @@ insert_dist_referrer(ReferredDist *referred_dist,
case ETS_REF: drp->ets_ref++; break;
case SYSTEM_REF: drp->system_ref++; break;
case SIGNAL_REF: drp->signal_ref++; break;
+ case SEQUENCE_REF: drp->sequence_ref++; break;
default: ASSERT(0);
}
}
@@ -1509,7 +1524,7 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
}
}
else if (IsSendCtxBinary(u.mref->mb)) {
- ErtsSendContext* ctx = ERTS_MAGIC_BIN_DATA(u.mref->mb);
+ ErtsDSigSendContext* ctx = ERTS_MAGIC_BIN_DATA(u.mref->mb);
if (ctx->deref_dep)
insert_dist_entry(ctx->dep, type, id, 0);
}
@@ -1544,16 +1559,18 @@ static void insert_monitor_data(ErtsMonitor *mon, int type, Eterm id)
mdp->origin.flags |= ERTS_ML_FLG_DBG_VISITED;
}
-static void insert_monitor(ErtsMonitor *mon, void *idp)
+static int insert_monitor(ErtsMonitor *mon, void *idp, Sint reds)
{
Eterm id = *((Eterm *) idp);
insert_monitor_data(mon, MONITOR_REF, id);
+ return 1;
}
-static void clear_visited_monitor(ErtsMonitor *mon, void *p)
+static int clear_visited_monitor(ErtsMonitor *mon, void *p, Sint reds)
{
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
mdp->origin.flags &= ~ERTS_ML_FLG_DBG_VISITED;
+ return 1;
}
static void
@@ -1581,6 +1598,20 @@ insert_dist_monitors(DistEntry *dep)
}
}
+
+static int
+insert_sequence(ErtsDistExternal *edep, void *arg, Sint reds)
+{
+ insert_dist_entry(edep->dep, SEQUENCE_REF, *(Eterm*)arg, 0);
+ return 1;
+}
+
+static void
+insert_dist_sequences(DistEntry *dep)
+{
+ erts_dist_seq_tree_foreach(dep, insert_sequence, (void *) &dep->sysname);
+}
+
static void
clear_visited_p_monitors(ErtsPTabElementCommon *p)
{
@@ -1621,16 +1652,18 @@ static void insert_link_data(ErtsLink *lnk, int type, Eterm id)
ldp->a.flags |= ERTS_ML_FLG_DBG_VISITED;
}
-static void insert_link(ErtsLink *lnk, void *idp)
+static int insert_link(ErtsLink *lnk, void *idp, Sint reds)
{
Eterm id = *((Eterm *) idp);
insert_link_data(lnk, LINK_REF, id);
+ return 1;
}
-static void clear_visited_link(ErtsLink *lnk, void *p)
+static int clear_visited_link(ErtsLink *lnk, void *p, Sint reds)
{
ErtsLinkData *ldp = erts_link_to_data(lnk);
ldp->a.flags &= ~ERTS_ML_FLG_DBG_VISITED;
+ return 1;
}
static void
@@ -1770,11 +1803,9 @@ insert_message(ErtsMessage *msg, int type, Process *proc)
else if (ERTS_SIG_IS_INTERNAL_MSG(msg))
heap_frag = msg->data.heap_frag;
else {
- if (msg->data.dist_ext->dep)
- insert_dist_entry(msg->data.dist_ext->dep,
- type, proc->common.id, 0);
- if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
- heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
+ heap_frag = msg->data.heap_frag;
+ insert_dist_entry(erts_get_dist_ext(heap_frag)->dep,
+ type, proc->common.id, 0);
}
}
while (heap_frag) {
@@ -1798,24 +1829,115 @@ insert_sig_offheap(ErlOffHeap *ohp, void *arg)
insert_offheap(ohp, SIGNAL_REF, proc->common.id);
}
-static void
-insert_sig_monitor(ErtsMonitor *mon, void *arg)
+static int
+insert_sig_monitor(ErtsMonitor *mon, void *arg, Sint reds)
{
Process *proc = arg;
insert_monitor_data(mon, SIGNAL_REF, proc->common.id);
+ return 1;
}
-static void
-insert_sig_link(ErtsLink *lnk, void *arg)
+static int
+insert_sig_link(ErtsLink *lnk, void *arg, Sint reds)
{
Process *proc = arg;
insert_link_data(lnk, SIGNAL_REF, proc->common.id);
+ return 1;
}
static void
-setup_reference_table(void)
+insert_sig_ext(ErtsDistExternal *edep, void *arg)
{
+ Process *proc = arg;
+ insert_dist_entry(edep->dep, SIGNAL_REF, proc->common.id, 0);
+}
+
+static void
+insert_process(Process *proc)
+{
+ int mli;
+ ErtsMessage *msg_list[] = {proc->msg_frag};
ErlHeapFragment *hfp;
+
+ /* Insert Heap */
+ insert_offheap(&(proc->off_heap),
+ HEAP_REF,
+ proc->common.id);
+ /* Insert heap fragments buffers */
+ for(hfp = proc->mbuf; hfp; hfp = hfp->next)
+ insert_offheap(&(hfp->off_heap),
+ HEAP_REF,
+ proc->common.id);
+
+ /* Insert msg buffers */
+ for (mli = 0; mli < sizeof(msg_list)/sizeof(msg_list[0]); mli++) {
+ ErtsMessage *msg;
+ for (msg = msg_list[mli]; msg; msg = msg->next)
+ insert_message(msg, HEAP_REF, proc);
+ }
+
+ /* Insert signal queue */
+ erts_proc_sig_debug_foreach_sig(proc,
+ insert_sig_msg,
+ insert_sig_offheap,
+ insert_sig_monitor,
+ insert_sig_link,
+ insert_sig_ext,
+ (void *) proc);
+
+ /* If the process is FREE, the proc->common field has been
+ re-used by the ptab delete, so we cannot trust it. */
+ if (!(erts_atomic32_read_nob(&proc->state) & ERTS_PSFLG_FREE)) {
+ /* Insert links */
+ insert_p_links(&proc->common);
+
+ /* Insert monitors */
+ insert_p_monitors(&proc->common);
+ }
+
+ {
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc);
+ if (dep)
+ insert_dist_entry(dep,
+ CTRL_REF,
+ proc->common.id,
+ 0);
+ }
+}
+
+static void
+insert_dist_suspended_procs(DistEntry *dep)
+{
+ ErtsProcList *plist = erts_proclist_peek_first(dep->suspended);
+ while (plist) {
+ if (is_not_immed(plist->u.pid))
+ insert_process(plist->u.p);
+ plist = erts_proclist_peek_next(dep->suspended, plist);
+ }
+}
+
+#ifdef ERL_NODE_BOOKKEEP
+void
+erts_node_bookkeep(ErlNode *np, Eterm term, int what)
+{
+ erts_aint_t slot = (erts_atomic_inc_read_nob(&np->slot) - 1) % 1024;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ Eterm who = THE_NON_VALUE;
+ ASSERT(np);
+ np->books[slot].what = what;
+ np->books[slot].term = term;
+ if (esdp->current_process) {
+ who = esdp->current_process->common.id;
+ } else if (esdp->current_port) {
+ who = esdp->current_port->common.id;
+ }
+ np->books[slot].who = who;
+}
+#endif
+
+static void
+setup_reference_table(void)
+{
DistEntry *dep;
HashInfo hi;
int i, max;
@@ -1865,52 +1987,10 @@ setup_reference_table(void)
/* Insert all processes */
for (i = 0; i < max; i++) {
Process *proc = erts_pix2proc(i);
- if (proc) {
- int mli;
- ErtsMessage *msg_list[] = {proc->msg_frag};
-
- /* Insert Heap */
- insert_offheap(&(proc->off_heap),
- HEAP_REF,
- proc->common.id);
- /* Insert heap fragments buffers */
- for(hfp = proc->mbuf; hfp; hfp = hfp->next)
- insert_offheap(&(hfp->off_heap),
- HEAP_REF,
- proc->common.id);
-
- /* Insert msg buffers */
- for (mli = 0; mli < sizeof(msg_list)/sizeof(msg_list[0]); mli++) {
- ErtsMessage *msg;
- for (msg = msg_list[mli]; msg; msg = msg->next)
- insert_message(msg, HEAP_REF, proc);
- }
-
- /* Insert signal queue */
- erts_proc_sig_debug_foreach_sig(proc,
- insert_sig_msg,
- insert_sig_offheap,
- insert_sig_monitor,
- insert_sig_link,
- (void *) proc);
-
- /* Insert links */
- insert_p_links(&proc->common);
-
- /* Insert monitors */
- insert_p_monitors(&proc->common);
-
- {
- DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc);
- if (dep)
- insert_dist_entry(dep,
- CTRL_REF,
- proc->common.id,
- 0);
- }
- }
+ if (proc)
+ insert_process(proc);
}
-
+
erts_foreach_sys_msg_in_q(insert_sys_msg);
/* Insert all ports */
@@ -1983,16 +2063,22 @@ setup_reference_table(void)
for(dep = erts_visible_dist_entries; dep; dep = dep->next) {
insert_dist_links(dep);
insert_dist_monitors(dep);
+ insert_dist_sequences(dep);
+ insert_dist_suspended_procs(dep);
}
for(dep = erts_hidden_dist_entries; dep; dep = dep->next) {
insert_dist_links(dep);
insert_dist_monitors(dep);
+ insert_dist_sequences(dep);
+ insert_dist_suspended_procs(dep);
}
for(dep = erts_pending_dist_entries; dep; dep = dep->next) {
insert_dist_links(dep);
insert_dist_monitors(dep);
+ insert_dist_sequences(dep);
+ insert_dist_suspended_procs(dep);
}
/* Not connected dist entries should not have any links,
@@ -2000,6 +2086,8 @@ setup_reference_table(void)
for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
insert_dist_links(dep);
insert_dist_monitors(dep);
+ insert_dist_sequences(dep);
+ insert_dist_suspended_procs(dep);
}
/* Insert all ets tables */
@@ -2173,6 +2261,10 @@ reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp)
tup = MK_2TUP(AM_system, MK_UINT(drp->system_ref));
drl = MK_CONS(tup, drl);
}
+ if(drp->sequence_ref) {
+ tup = MK_2TUP(AM_sequence, MK_UINT(drp->sequence_ref));
+ drl = MK_CONS(tup, drl);
+ }
if(drp->signal_ref) {
tup = MK_2TUP(AM_signal, MK_UINT(drp->signal_ref));
drl = MK_CONS(tup, drl);
@@ -2202,7 +2294,7 @@ reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp)
tup = MK_2TUP(AM_ets, drp->id);
}
else {
- ASSERT(!drp->ctrl_ref && drp->node_ref && !drp->signal_ref);
+ ASSERT(!drp->ctrl_ref && (drp->node_ref || drp->sequence_ref) && !drp->signal_ref);
ASSERT(is_atom(drp->id));
tup = MK_2TUP(drp->id, MK_UINT(drp->creation));
tup = MK_2TUP(AM_node, tup);
@@ -2247,6 +2339,12 @@ static void noop_sig_offheap(ErlOffHeap *oh, void *arg)
}
+static void noop_sig_ext(ErtsDistExternal *ext, void *arg)
+{
+
+}
+
+
static void
delete_reference_table(void)
{
@@ -2297,6 +2395,7 @@ delete_reference_table(void)
noop_sig_offheap,
clear_visited_monitor,
clear_visited_link,
+ noop_sig_ext,
(void *) proc);
}
}
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index c44f1f8991..aa8af12555 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -95,14 +95,26 @@ enum dist_entry_state {
struct ErtsDistOutputBuf_ {
#ifdef DEBUG
Uint dbg_pattern;
+ byte *ext_startp;
byte *alloc_endp;
#endif
ErtsDistOutputBuf *next;
- Uint hopefull_flags;
+ Binary *bin;
+ /* Pointers to the distribution header,
+ if NULL the distr header is in the extp */
+ byte *hdrp;
+ byte *hdr_endp;
+ /* Pointers to the ctl + payload */
byte *extp;
byte *ext_endp;
+ /* Start of payload and hopefull_flags, used by transcode */
+ Uint hopefull_flags;
byte *msg_start;
- byte data[1];
+ /* start of the ext buffer, this is not always the same as extp
+ as the atom cache handling can use less then the allotted buffer.
+ This value is needed to calculate the size of this output buffer.*/
+ byte *ext_start;
+
};
typedef struct {
@@ -137,6 +149,7 @@ struct dist_entry_ {
enum dist_entry_state state;
Uint32 flags; /* Distribution flags, like hidden,
atom cache etc. */
+ Uint32 opts;
unsigned long version; /* Protocol version */
ErtsMonLnkDist *mld; /* Monitors and links */
@@ -161,7 +174,46 @@ struct dist_entry_ {
ErtsThrPrgrLaterOp later_op;
struct transcode_context* transcode_ctx;
+
+ struct dist_sequences *sequences; /* Ongoing distribution sequences */
+};
+
+/*
+#define ERL_NODE_BOOKKEEP
+ * Bookkeeping of ErlNode inc and dec operations to help debug refc problems.
+ * This is best used together with cerl -rr. Type the below into gdb:
+ * gdb:
+set pagination off
+set $i = 0
+set $node = referred_nodes[$node_ix].node
+while $i < $node->slot.counter
+ printf "%p: ", $node->books[$i].term
+ etp-1 $node->books[$i].who
+ printf " "
+ p $node->books[$i].what
+ set $i++
+end
+
+ * Then save that into a file called test.txt and run the below in
+ * an erlang shell in order to get all inc/dec that do not have a
+ * match.
+
+f(), {ok, B} = file:read_file("test.txt").
+Vs = [begin [Val, _, _, _, What] = All = string:lexemes(Ln, " "),{Val,What,All} end || Ln <- string:lexemes(B,"\n")].
+Accs = lists:foldl(fun({V,<<"ERL_NODE_INC">>,_},M) -> Val = maps:get(V,M,0), M#{ V => Val + 1 }; ({V,<<"ERL_NODE_DEC">>,_},M) -> Val = maps:get(V,M,0), M#{ V => Val - 1 } end, #{}, Vs).
+lists:usort(lists:filter(fun({V,N}) -> N /= 0 end, maps:to_list(Accs))).
+
+ * There are bound to be bugs in the the instrumentation code, but
+ * atleast this is a place to start when hunting refc bugs.
+ *
+ */
+#ifdef ERL_NODE_BOOKKEEP
+struct erl_node_bookkeeping {
+ Eterm who;
+ Eterm term;
+ enum { ERL_NODE_INC, ERL_NODE_DEC } what;
};
+#endif
typedef struct erl_node_ {
HashBucket hash_bucket; /* Hash bucket */
@@ -169,6 +221,10 @@ typedef struct erl_node_ {
Eterm sysname; /* name@host atom for efficiency */
Uint32 creation; /* Creation */
DistEntry *dist_entry; /* Corresponding dist entry */
+#ifdef ERL_NODE_BOOKKEEP
+ struct erl_node_bookkeeping books[1024];
+ erts_atomic_t slot;
+#endif
} ErlNode;
@@ -201,7 +257,7 @@ void erts_dist_table_info(fmtfn_t, void *);
void erts_set_dist_entry_not_connected(DistEntry *);
void erts_set_dist_entry_pending(DistEntry *);
void erts_set_dist_entry_connected(DistEntry *, Eterm, Uint);
-ErlNode *erts_find_or_insert_node(Eterm, Uint32);
+ErlNode *erts_find_or_insert_node(Eterm, Uint32, Eterm);
void erts_schedule_delete_node(ErlNode *);
void erts_set_this_node(Eterm, Uint);
Uint erts_node_table_size(void);
@@ -219,18 +275,38 @@ DistEntry *erts_dhandle_to_dist_entry(Eterm dhandle, Uint32* connection_id);
Eterm erts_build_dhandle(Eterm **hpp, ErlOffHeap*, DistEntry*, Uint32 conn_id);
Eterm erts_make_dhandle(Process *c_p, DistEntry*, Uint32 conn_id);
-ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np);
+ERTS_GLB_INLINE void erts_init_node_entry(ErlNode *np, erts_aint_t val);
+ERTS_GLB_INLINE erts_aint_t erts_ref_node_entry(ErlNode *np, int min_val, Eterm term);
+ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np, Eterm term);
ERTS_GLB_INLINE void erts_de_rlock(DistEntry *dep);
ERTS_GLB_INLINE void erts_de_runlock(DistEntry *dep);
ERTS_GLB_INLINE void erts_de_rwlock(DistEntry *dep);
ERTS_GLB_INLINE void erts_de_rwunlock(DistEntry *dep);
+#ifdef ERL_NODE_BOOKKEEP
+void erts_node_bookkeep(ErlNode *, Eterm , int);
+#else
+#define erts_node_bookkeep(...)
+#endif
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_deref_node_entry(ErlNode *np)
+erts_init_node_entry(ErlNode *np, erts_aint_t val)
+{
+ erts_refc_init(&np->refc, val);
+}
+
+ERTS_GLB_INLINE erts_aint_t
+erts_ref_node_entry(ErlNode *np, int min_val, Eterm term)
+{
+ erts_node_bookkeep(np, term, ERL_NODE_INC);
+ return erts_refc_inctest(&np->refc, min_val);
+}
+
+ERTS_GLB_INLINE void
+erts_deref_node_entry(ErlNode *np, Eterm term)
{
- ASSERT(np);
+ erts_node_bookkeep(np, term, ERL_NODE_DEC);
if (erts_refc_dectest(&np->refc, 0) == 0)
erts_schedule_delete_node(np);
}
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index c8f2e88127..30a7875387 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -2094,7 +2094,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", pp->common.id);
while (plp2 != NULL) {
- erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)), "%T", plp2->pid);
+ erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)), "%T", plp2->u.pid);
DTRACE2(process_port_unblocked, pid_str, port_str);
}
}
diff --git a/erts/emulator/beam/erl_posix_str.c b/erts/emulator/beam/erl_posix_str.c
index 7b3e640d3f..5b515d6e78 100644
--- a/erts/emulator/beam/erl_posix_str.c
+++ b/erts/emulator/beam/erl_posix_str.c
@@ -171,6 +171,9 @@ erl_errno_id(error)
#if defined(EIDRM) && (!defined(EINPROGRESS) || (EIDRM != EINPROGRESS))
case EIDRM: return "eidrm";
#endif
+#ifdef EILSEQ
+ case EILSEQ: return "eilseq";
+#endif
#ifdef EINIT
case EINIT: return "einit";
#endif
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index 9cfb7fc681..2e33a8a782 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -487,6 +487,8 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
PRINT_UWORD(res, fn, arg, 'u', 0, 1, octet);
++bytep;
--bytesize;
+ if ((*dcount)-- <= 0)
+ goto L_done;
}
if (bitsize) {
Uint bits = bitoffs + bitsize;
@@ -521,6 +523,8 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
PRINT_CHAR(res, fn, arg, octet);
++bytep;
--bytesize;
+ if ((*dcount)-- <= 0)
+ goto L_done;
}
PRINT_STRING(res, fn, arg, "\">>");
}
diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c
index d475a0a634..f58a606d57 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.c
+++ b/erts/emulator/beam/erl_proc_sig_queue.c
@@ -36,6 +36,7 @@
#include "erl_port_task.h"
#include "erl_trace.h"
#include "beam_bp.h"
+#include "erl_binary.h"
#include "big.h"
#include "erl_gc.h"
#include "bif.h"
@@ -80,6 +81,11 @@
#define ERTS_SIG_Q_TYPE_ADJUST_TRACE_INFO \
ERTS_SIG_Q_TYPE_MAX
+#define ERTS_SIG_IS_GEN_EXIT(sig) \
+ (ERTS_PROC_SIG_TYPE(((ErtsSignal *) sig)->common.tag) == ERTS_SIG_Q_TYPE_GEN_EXIT)
+#define ERTS_SIG_IS_GEN_EXIT_EXTERNAL(sig) \
+ (ASSERT(ERTS_SIG_IS_GEN_EXIT(sig)),is_non_value(get_exit_signal_data(sig)->reason))
+
Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler);
Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler_high);
Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler_max);
@@ -259,7 +265,7 @@ destroy_dist_proc_demonitor(ErtsSigDistProcDemonitor *dmon)
Eterm ref = dmon->ref;
if (is_external(ref)) {
ExternalThing *etp = external_thing_ptr(ref);
- erts_deref_node_entry(etp->node);
+ erts_deref_node_entry(etp->node, ref);
}
erts_free(ERTS_ALC_T_DIST_DEMONITOR, dmon);
}
@@ -294,7 +300,8 @@ destroy_sig_dist_link_op(ErtsSigDistLinkOp *sdlnk)
{
ASSERT(is_external_pid(sdlnk->remote));
ASSERT(boxed_val(sdlnk->remote) == &sdlnk->heap[0]);
- erts_deref_node_entry(((ExternalThing *) &sdlnk->heap[0])->node);
+ erts_deref_node_entry(((ExternalThing *) &sdlnk->heap[0])->node,
+ make_boxed(&sdlnk->heap[0]));
erts_free(ERTS_ALC_T_SIG_DATA, sdlnk);
}
@@ -604,7 +611,8 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
#endif
return 1;
}
- ASSERT(esdp->pending_signal.dbg_from == esdp->current_process);
+ ASSERT(esdp->pending_signal.dbg_from == esdp->current_process ||
+ esdp->pending_signal.dbg_from == esdp->free_process);
if (pend_sig != sig) {
/* Switch them and send previously pending signal instead */
Eterm pend_to = esdp->pending_signal.to;
@@ -936,29 +944,54 @@ erts_proc_sig_privqs_len(Process *c_p)
return proc_sig_privqs_len(c_p, 0);
}
+ErtsDistExternal *
+erts_proc_sig_get_external(ErtsMessage *msgp)
+{
+ if (ERTS_SIG_IS_EXTERNAL_MSG(msgp)) {
+ return erts_get_dist_ext(msgp->data.heap_frag);
+ } else if (ERTS_SIG_IS_NON_MSG(msgp) &&
+ ERTS_SIG_IS_GEN_EXIT(msgp) &&
+ ERTS_SIG_IS_GEN_EXIT_EXTERNAL(msgp)) {
+ ErtsDistExternal *edep;
+ ErtsExitSignalData *xsigd = get_exit_signal_data(msgp);
+ ASSERT(ERTS_PROC_SIG_TYPE(((ErtsSignal *) msgp)->common.tag) == ERTS_SIG_Q_TYPE_GEN_EXIT);
+ ASSERT(is_non_value(xsigd->reason));
+ if (msgp->hfrag.next == NULL)
+ edep = (ErtsDistExternal*)(xsigd + 1);
+ else
+ edep = erts_get_dist_ext(msgp->hfrag.next);
+ return edep;
+ }
+ return NULL;
+}
+
static void do_seq_trace_output(Eterm to, Eterm token, Eterm msg);
static void
send_gen_exit_signal(Process *c_p, Eterm from_tag,
Eterm from, Eterm to,
- Sint16 op, Eterm reason, Eterm ref,
- Eterm token, int normal_kills)
+ Sint16 op, Eterm reason, ErtsDistExternal *dist_ext,
+ ErlHeapFragment *dist_ext_hfrag,
+ Eterm ref, Eterm token, int normal_kills)
{
ErtsExitSignalData *xsigd;
Eterm *hp, *start_hp, s_reason, s_ref, s_message, s_token, s_from;
ErtsMessage *mp;
ErlHeapFragment *hfrag;
ErlOffHeap *ohp;
- Uint hsz, from_sz, reason_sz, ref_sz, token_sz;
+ Uint hsz, from_sz, reason_sz, ref_sz, token_sz, dist_ext_sz;
int seq_trace;
#ifdef USE_VM_PROBES
Eterm s_utag, utag;
Uint utag_sz;
#endif
+ ASSERT((is_value(reason) && dist_ext == NULL) ||
+ (is_non_value(reason) && dist_ext != NULL));
+
ASSERT(is_immed(from_tag));
- hsz = sizeof(ErtsExitSignalData)/sizeof(Uint);
+ hsz = sizeof(ErtsExitSignalData)/sizeof(Eterm);
seq_trace = c_p && have_seqtrace(token);
if (seq_trace)
@@ -977,33 +1010,44 @@ send_gen_exit_signal(Process *c_p, Eterm from_tag,
hsz += utag_sz;
#endif
- token_sz = is_immed(token) ? 0 : size_object(token);
+ token_sz = size_object(token);
hsz += token_sz;
- from_sz = is_immed(from) ? 0 : size_object(from);
+ from_sz = size_object(from);
hsz += from_sz;
- reason_sz = is_immed(reason) ? 0 : size_object(reason);
- hsz += reason_sz;
+ ref_sz = size_object(ref);
+ hsz += ref_sz;
- switch (op) {
- case ERTS_SIG_Q_OP_EXIT:
- case ERTS_SIG_Q_OP_EXIT_LINKED: {
- /* {'EXIT', From, Reason} */
- hsz += 4; /* 3-tuple */
- ref_sz = 0;
- break;
- }
- case ERTS_SIG_Q_OP_MONITOR_DOWN: {
- /* {'DOWN', Ref, process, From, Reason} */
- hsz += 6; /* 5-tuple */
- ref_sz = NC_HEAP_SIZE(ref);
- hsz += ref_sz;
- break;
- }
- default:
- ERTS_INTERNAL_ERROR("Invalid exit signal op");
- break;
+ reason_sz = 0; /* Set to silence gcc warning */
+
+ /* The reason was part of the control message,
+ just use copy it into the xsigd */
+ if (is_value(reason)) {
+ reason_sz = size_object(reason);
+ hsz += reason_sz;
+
+ switch (op) {
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED: {
+ /* {'EXIT', From, Reason} */
+ hsz += 4; /* 3-tuple */
+ break;
+ }
+ case ERTS_SIG_Q_OP_MONITOR_DOWN: {
+ /* {'DOWN', Ref, process, From, Reason} */
+ hsz += 6; /* 5-tuple */
+ break;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid exit signal op");
+ break;
+ }
+ } else if (dist_ext != NULL && dist_ext_hfrag == NULL) {
+ /* The message was not fragmented so we need to create space
+ for a single dist_ext element */
+ dist_ext_sz = erts_dist_ext_size(dist_ext) / sizeof(Eterm);
+ hsz += dist_ext_sz;
}
/*
@@ -1015,35 +1059,33 @@ send_gen_exit_signal(Process *c_p, Eterm from_tag,
ohp = &hfrag->off_heap;
start_hp = hp;
- s_token = (is_immed(token)
- ? token
- : copy_struct(token, token_sz, &hp, ohp));
+ s_token = copy_struct(token, token_sz, &hp, ohp);
+ s_from = copy_struct(from, from_sz, &hp, ohp);
+ s_ref = copy_struct(ref, ref_sz, &hp, ohp);
- s_reason = (is_immed(reason)
- ? reason
- : copy_struct(reason, reason_sz, &hp, ohp));
+ if (is_value(reason)) {
+ s_reason = copy_struct(reason, reason_sz, &hp, ohp);
- s_from = (is_immed(from)
- ? from
- : copy_struct(from, from_sz, &hp, ohp));
-
- if (!ref_sz)
- s_ref = NIL;
- else
- s_ref = STORE_NC(&hp, ohp, ref);
-
- switch (op) {
- case ERTS_SIG_Q_OP_EXIT:
- case ERTS_SIG_Q_OP_EXIT_LINKED:
- /* {'EXIT', From, Reason} */
- s_message = TUPLE3(hp, am_EXIT, s_from, s_reason);
- hp += 4;
- break;
- case ERTS_SIG_Q_OP_MONITOR_DOWN:
- /* {'DOWN', Ref, process, From, Reason} */
- s_message = TUPLE5(hp, am_DOWN, s_ref, am_process, s_from, s_reason);
- hp += 6;
- break;
+ switch (op) {
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED:
+ /* {'EXIT', From, Reason} */
+ s_message = TUPLE3(hp, am_EXIT, s_from, s_reason);
+ hp += 4;
+ break;
+ case ERTS_SIG_Q_OP_MONITOR_DOWN:
+ /* {'DOWN', Ref, process, From, Reason} */
+ s_message = TUPLE5(hp, am_DOWN, s_ref, am_process, s_from, s_reason);
+ hp += 6;
+ break;
+ default:
+ /* This cannot happen, used to silence gcc warning */
+ s_message = THE_NON_VALUE;
+ break;
+ }
+ } else {
+ s_message = THE_NON_VALUE;
+ s_reason = THE_NON_VALUE;
}
#ifdef USE_VM_PROBES
@@ -1061,11 +1103,13 @@ send_gen_exit_signal(Process *c_p, Eterm from_tag,
hfrag->used_size = hp - start_hp;
- xsigd = (ErtsExitSignalData *) (char *) hp;
+ xsigd = (ErtsExitSignalData *) hp;
xsigd->message = s_message;
xsigd->from = s_from;
xsigd->reason = s_reason;
+ hfrag->next = dist_ext_hfrag;
+
if (is_nil(s_ref))
xsigd->u.normal_kills = normal_kills;
else {
@@ -1073,6 +1117,15 @@ send_gen_exit_signal(Process *c_p, Eterm from_tag,
xsigd->u.ref = s_ref;
}
+ hp += sizeof(ErtsExitSignalData)/sizeof(Eterm);
+
+ if (dist_ext != NULL && dist_ext_hfrag == NULL && is_non_value(reason)) {
+ erts_make_dist_ext_copy(dist_ext, (ErtsDistExternal *) hp);
+ hp += dist_ext_sz;
+ }
+
+ ASSERT(hp == mp->hfrag.mem + mp->hfrag.alloc_size);
+
if (seq_trace)
do_seq_trace_output(to, s_token, s_message);
@@ -1205,7 +1258,19 @@ erts_proc_sig_send_exit(Process *c_p, Eterm from, Eterm to,
from_tag = dep->sysname;
}
send_gen_exit_signal(c_p, from_tag, from, to, ERTS_SIG_Q_OP_EXIT,
- reason, NIL, token, normal_kills);
+ reason, NULL, NULL, NIL, token, normal_kills);
+}
+
+void
+erts_proc_sig_send_dist_exit(DistEntry *dep,
+ Eterm from, Eterm to,
+ ErtsDistExternal *dist_ext,
+ ErlHeapFragment *hfrag,
+ Eterm reason, Eterm token)
+{
+ send_gen_exit_signal(NULL, dep->sysname, from, to, ERTS_SIG_Q_OP_EXIT,
+ reason, dist_ext, hfrag, NIL, token, 0);
+
}
void
@@ -1219,7 +1284,7 @@ erts_proc_sig_send_link_exit(Process *c_p, Eterm from, ErtsLink *lnk,
if (is_not_immed(reason) || is_not_nil(token)) {
ASSERT(is_internal_pid(from) || is_internal_port(from));
send_gen_exit_signal(c_p, from, from, to, ERTS_SIG_Q_OP_EXIT_LINKED,
- reason, NIL, token, 0);
+ reason, NULL, NULL, NIL, token, 0);
}
else {
/* Pass signal using old link structure... */
@@ -1274,10 +1339,13 @@ erts_proc_sig_send_unlink(Process *c_p, ErtsLink *lnk)
void
erts_proc_sig_send_dist_link_exit(DistEntry *dep,
Eterm from, Eterm to,
+ ErtsDistExternal *dist_ext,
+ ErlHeapFragment *hfrag,
Eterm reason, Eterm token)
{
send_gen_exit_signal(NULL, dep->sysname, from, to, ERTS_SIG_Q_OP_EXIT_LINKED,
- reason, NIL, token, 0);
+ reason, dist_ext, hfrag, NIL, token, 0);
+
}
void
@@ -1299,16 +1367,18 @@ erts_proc_sig_send_dist_unlink(DistEntry *dep, Eterm from, Eterm to)
void
erts_proc_sig_send_dist_monitor_down(DistEntry *dep, Eterm ref,
Eterm from, Eterm to,
+ ErtsDistExternal *dist_ext,
+ ErlHeapFragment *hfrag,
Eterm reason)
{
Eterm monitored, heap[3];
- if (is_atom(from))
+ if (is_atom(from))
monitored = TUPLE2(&heap[0], from, dep->sysname);
else
monitored = from;
send_gen_exit_signal(NULL, dep->sysname, monitored,
to, ERTS_SIG_Q_OP_MONITOR_DOWN,
- reason, ref, NIL, 0);
+ reason, dist_ext, hfrag, ref, NIL, 0);
}
void
@@ -1376,10 +1446,10 @@ erts_proc_sig_send_monitor_down(ErtsMonitor *mon, Eterm reason)
|| is_internal_pid(from_tag)
|| is_atom(from_tag));
monitored = TUPLE2(&heap[0], name, node);
- }
+ }
send_gen_exit_signal(NULL, from_tag, monitored,
to, ERTS_SIG_Q_OP_MONITOR_DOWN,
- reason, mdp->ref, NIL, 0);
+ reason, NULL, NULL, mdp->ref, NIL, 0);
}
erts_monitor_release(mon);
}
@@ -2037,7 +2107,6 @@ handle_exit_signal(Process *c_p, ErtsSigRecvTracing *tracing,
if (type == ERTS_SIG_Q_TYPE_GEN_EXIT) {
xsigd = get_exit_signal_data(sig);
from = xsigd->from;
- reason = xsigd->reason;
if (op != ERTS_SIG_Q_OP_EXIT_LINKED)
ignore = 0;
else {
@@ -2062,6 +2131,18 @@ handle_exit_signal(Process *c_p, ErtsSigRecvTracing *tracing,
}
}
+ /* This GEN_EXIT was received from another node, decode the exit reason */
+ if (ERTS_SIG_IS_GEN_EXIT_EXTERNAL(sig))
+ erts_proc_sig_decode_dist(c_p, ERTS_PROC_LOCK_MAIN, sig, 1);
+
+ reason = xsigd->reason;
+
+ if (is_non_value(reason)) {
+ /* Bad distribution message; remove it from queue... */
+ ignore = !0;
+ destroy = !0;
+ }
+
if (!ignore) {
if ((op != ERTS_SIG_Q_OP_EXIT || reason != am_kill)
@@ -2929,6 +3010,104 @@ handle_sync_suspend(Process *c_p, ErtsMessage *mp)
}
}
+int
+erts_proc_sig_decode_dist(Process *proc, ErtsProcLocks proc_locks,
+ ErtsMessage *msgp, int force_off_heap)
+{
+ ErtsHeapFactory factory;
+ ErlHeapFragment *hfrag;
+ Eterm msg;
+ Sint need;
+ ErtsDistExternal *edep;
+ ErtsExitSignalData *xsigd = NULL;
+
+ edep = erts_proc_sig_get_external(msgp);
+ if (!ERTS_SIG_IS_EXTERNAL_MSG(msgp))
+ xsigd = get_exit_signal_data(msgp);
+
+ if (edep->heap_size >= 0)
+ need = edep->heap_size;
+ else {
+ need = erts_decode_dist_ext_size(edep, 1);
+ if (need < 0) {
+ /* bad signal; remove it... */
+ return 0;
+ }
+
+ edep->heap_size = need;
+ }
+
+ if (ERTS_SIG_IS_NON_MSG(msgp)) {
+ switch (ERTS_PROC_SIG_OP(ERL_MESSAGE_TERM(msgp))) {
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED:
+ /* {'EXIT', From, Reason} */
+ need += 4;
+ break;
+ case ERTS_SIG_Q_OP_MONITOR_DOWN:
+ /* {'DOWN', Ref, process, From, Reason} */
+ need += 6; /* 5-tuple */
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid exit signal op");
+ break;
+ }
+ }
+
+ hfrag = new_message_buffer(need);
+ erts_factory_heap_frag_init(&factory, hfrag);
+
+ ASSERT(edep->heap_size >= 0);
+
+ msg = erts_decode_dist_ext(&factory, edep, 1);
+
+ if (is_non_value(msg)) {
+ erts_factory_undo(&factory);
+ return 0;
+ }
+
+ if (ERTS_SIG_IS_MSG(msgp)) {
+ ERL_MESSAGE_TERM(msgp) = msg;
+ if (msgp->data.heap_frag == &msgp->hfrag)
+ msgp->data.heap_frag = ERTS_MSG_COMBINED_HFRAG;
+ } else {
+ switch (ERTS_PROC_SIG_OP(ERL_MESSAGE_TERM(msgp))) {
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED:
+ /* {'EXIT', From, Reason} */
+ erts_reserve_heap(&factory, 4);
+ xsigd->message = TUPLE3(factory.hp, am_EXIT, xsigd->from, msg);
+ factory.hp += 4;
+ break;
+ case ERTS_SIG_Q_OP_MONITOR_DOWN:
+ /* {'DOWN', Ref, process, From, Reason} */
+ erts_reserve_heap(&factory, 6);
+ xsigd->message = TUPLE5(factory.hp, am_DOWN, xsigd->u.ref, am_process, xsigd->from, msg);
+ factory.hp += 6;
+ break;
+ }
+ xsigd->reason = msg;
+ }
+
+ erts_free_dist_ext_copy(edep);
+
+ erts_factory_close(&factory);
+
+ hfrag = factory.heap_frags;
+ while (hfrag->next)
+ hfrag = hfrag->next;
+
+ if (ERTS_SIG_IS_MSG(msgp) && msgp->data.heap_frag != ERTS_MSG_COMBINED_HFRAG) {
+ hfrag->next = msgp->data.heap_frag;
+ msgp->data.heap_frag = factory.heap_frags;
+ } else {
+ hfrag->next = msgp->hfrag.next;
+ msgp->hfrag.next = factory.heap_frags;
+ }
+
+ return 1;
+}
+
void
erts_proc_sig_handle_pending_suspend(Process *c_p)
{
@@ -3045,7 +3224,7 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
ASSERT(ERTS_SIG_IS_NON_MSG(sig));
tag = ((ErtsSignal *) sig)->common.tag;
-
+
switch (ERTS_PROC_SIG_OP(tag)) {
case ERTS_SIG_Q_OP_EXIT:
@@ -3091,6 +3270,12 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
break;
case ERTS_SIG_Q_TYPE_GEN_EXIT:
xsigd = get_exit_signal_data(sig);
+
+ /* This GEN_EXIT was received from another node, decode the exit reason */
+ if (ERTS_SIG_IS_GEN_EXIT_EXTERNAL(sig))
+ if (!erts_proc_sig_decode_dist(c_p, ERTS_PROC_LOCK_MAIN, sig, 1))
+ break; /* Decode failed, just remove signal */
+
omon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p),
xsigd->u.ref);
if (omon) {
@@ -3596,9 +3781,10 @@ stretch_limit(Process *c_p, ErtsSigRecvTracing *tp,
int
-erts_proc_sig_handle_exit(Process *c_p, int *redsp)
+erts_proc_sig_handle_exit(Process *c_p, Sint *redsp)
{
- int cnt, limit;
+ int cnt;
+ Sint limit;
ErtsMessage *sig, ***next_nm_sig;
ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
@@ -3677,9 +3863,9 @@ erts_proc_sig_handle_exit(Process *c_p, int *redsp)
break;
case ERTS_SIG_Q_OP_MONITOR: {
- ErtsProcExitContext pectxt = {c_p, am_noproc};
+ ErtsProcExitContext pectxt = {c_p, am_noproc, NULL, NULL, NIL};
erts_proc_exit_handle_monitor((ErtsMonitor *) sig,
- (void *) &pectxt);
+ (void *) &pectxt, -1);
cnt += 4;
break;
}
@@ -3693,7 +3879,7 @@ erts_proc_sig_handle_exit(Process *c_p, int *redsp)
case ERTS_SIG_Q_OP_LINK: {
ErtsProcExitContext pectxt = {c_p, am_noproc};
- erts_proc_exit_handle_link((ErtsLink *) sig, (void *) &pectxt);
+ erts_proc_exit_handle_link((ErtsLink *) sig, (void *) &pectxt, -1);
break;
}
@@ -4202,11 +4388,13 @@ handle_msg_tracing(Process *c_p, ErtsSigRecvTracing *tracing,
return -1; /* Yield... */
}
if (ERTS_SIG_IS_EXTERNAL_MSG(sig)) {
- cnt++;
- if (!erts_decode_dist_message(c_p, ERTS_PROC_LOCK_MAIN,
- sig, 0)) {
+ cnt += 50; /* Decode is expensive... */
+ if (!erts_proc_sig_decode_dist(c_p, ERTS_PROC_LOCK_MAIN,
+ sig, 0)) {
/* Bad dist message; remove it... */
remove_mq_m_sig(c_p, sig, next_sig, next_nm_sig);
+ sig->next = NULL;
+ erts_cleanup_messages(sig);
sig = *next_sig;
continue;
}
@@ -4278,18 +4466,12 @@ erts_proc_sig_prep_msgq_for_inspection(Process *c_p,
if (ERTS_SIG_IS_EXTERNAL_MSG(mp)) {
/* decode it... */
- if (mp->data.attached)
- erts_decode_dist_message(rp, rp_locks, mp, !0);
-
- msg = ERL_MESSAGE_TERM(mp);
-
- if (is_non_value(msg)) {
+ if (!erts_proc_sig_decode_dist(rp, rp_locks, mp, !0)) {
ErtsMessage *bad_mp = mp;
/*
* Bad distribution message; remove
* it from the queue...
*/
- ASSERT(!mp->data.attached);
ASSERT(*mpp == bad_mp);
@@ -4301,6 +4483,8 @@ erts_proc_sig_prep_msgq_for_inspection(Process *c_p,
erts_cleanup_messages(bad_mp);
continue;
}
+
+ msg = ERL_MESSAGE_TERM(mp);
}
ASSERT(is_value(msg));
@@ -4459,12 +4643,21 @@ debug_foreach_sig_fake_oh(Eterm term,
}
+static void
+debug_foreach_sig_external(ErtsMessage *msgp,
+ void (*ext_func)(ErtsDistExternal *, void *),
+ void *arg)
+{
+ ext_func(erts_proc_sig_get_external(msgp), arg);
+}
+
void
erts_proc_sig_debug_foreach_sig(Process *c_p,
void (*msg_func)(ErtsMessage *, void *),
void (*oh_func)(ErlOffHeap *, void *),
- void (*mon_func)(ErtsMonitor *, void *),
- void (*lnk_func)(ErtsLink *, void *),
+ ErtsMonitorFunc mon_func,
+ ErtsLinkFunc lnk_func,
+ void (*ext_func)(ErtsDistExternal *, void *),
void *arg)
{
ErtsMessage *queue[] = {c_p->sig_qs.first, c_p->sig_qs.cont, c_p->sig_inq.first};
@@ -4473,10 +4666,10 @@ erts_proc_sig_debug_foreach_sig(Process *c_p,
for (qix = 0; qix < sizeof(queue)/sizeof(queue[0]); qix++) {
ErtsMessage *sig;
for (sig = queue[qix]; sig; sig = sig->next) {
-
- if (ERTS_SIG_IS_MSG(sig))
+
+ if (ERTS_SIG_IS_MSG(sig)) {
msg_func(sig, arg);
- else {
+ } else {
Eterm tag;
Uint16 type;
int op;
@@ -4495,18 +4688,21 @@ erts_proc_sig_debug_foreach_sig(Process *c_p,
case ERTS_SIG_Q_OP_MONITOR_DOWN:
switch (type) {
case ERTS_SIG_Q_TYPE_GEN_EXIT:
- debug_foreach_sig_heap_frags(&sig->hfrag, oh_func, arg);
+ if (ERTS_SIG_IS_GEN_EXIT_EXTERNAL(sig))
+ debug_foreach_sig_external(sig, ext_func, arg);
+ else
+ debug_foreach_sig_heap_frags(&sig->hfrag, oh_func, arg);
break;
case ERTS_LNK_TYPE_PORT:
case ERTS_LNK_TYPE_PROC:
case ERTS_LNK_TYPE_DIST_PROC:
- lnk_func((ErtsLink *) sig, arg);
+ lnk_func((ErtsLink *) sig, arg, -1);
break;
case ERTS_MON_TYPE_PORT:
case ERTS_MON_TYPE_PROC:
case ERTS_MON_TYPE_DIST_PROC:
case ERTS_MON_TYPE_NODE:
- mon_func((ErtsMonitor *) sig, arg);
+ mon_func((ErtsMonitor *) sig, arg, -1);
break;
default:
ERTS_INTERNAL_ERROR("Unexpected sig type");
@@ -4527,7 +4723,7 @@ erts_proc_sig_debug_foreach_sig(Process *c_p,
/* Fall through... */
case ERTS_SIG_Q_OP_MONITOR:
- mon_func((ErtsMonitor *) sig, arg);
+ mon_func((ErtsMonitor *) sig, arg, -1);
break;
case ERTS_SIG_Q_OP_UNLINK:
@@ -4539,7 +4735,7 @@ erts_proc_sig_debug_foreach_sig(Process *c_p,
/* Fall through... */
case ERTS_SIG_Q_OP_LINK:
- lnk_func((ErtsLink *) sig, arg);
+ lnk_func((ErtsLink *) sig, arg, -1);
break;
case ERTS_SIG_Q_OP_GROUP_LEADER: {
diff --git a/erts/emulator/beam/erl_proc_sig_queue.h b/erts/emulator/beam/erl_proc_sig_queue.h
index 6b065a7add..2b055e73bc 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.h
+++ b/erts/emulator/beam/erl_proc_sig_queue.h
@@ -89,6 +89,7 @@
#endif
struct erl_mesg;
+struct erl_dist_external;
typedef struct {
struct erl_mesg *next;
@@ -212,6 +213,38 @@ erts_proc_sig_send_exit(Process *c_p, Eterm from, Eterm to,
/**
*
+ * @brief Send an exit signal to a process.
+ *
+ * This function is used instead of erts_proc_sig_send_link_exit()
+ * when the signal arrives via the distribution and
+ * therefore no link structure is available.
+ *
+ * @param[in] dep Distribution entry of channel
+ * that the signal arrived on.
+ *
+ * @param[in] from Identifier of sender.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] dist_ext The exit reason in external term format
+ *
+ * @param[in] hfrag Heap frag with trace token and dist_ext
+ * iff available, otherwise NULL.
+ *
+ * @param[in] reason Exit reason.
+ *
+ * @param[in] token Seq trace token.
+ *
+ */
+void
+erts_proc_sig_send_dist_exit(DistEntry *dep,
+ Eterm from, Eterm to,
+ ErtsDistExternal *dist_ext,
+ ErlHeapFragment *hfrag,
+ Eterm reason, Eterm token);
+
+/**
+ *
* @brief Send an exit signal due to broken link to a process.
*
*
@@ -282,7 +315,7 @@ erts_proc_sig_send_unlink(Process *c_p, ErtsLink *lnk);
*
* This function is used instead of erts_proc_sig_send_link_exit()
* when the signal arrives via the distribution and
- * no link structure is available.
+ * therefore no link structure is available.
*
* @param[in] dep Distribution entry of channel
* that the signal arrived on.
@@ -291,6 +324,11 @@ erts_proc_sig_send_unlink(Process *c_p, ErtsLink *lnk);
*
* @param[in] to Identifier of receiver.
*
+ * @param[in] dist_ext The exit reason in external term format
+ *
+ * @param[in] hfrag Heap frag with trace token and dist_ext
+ * iff available, otherwise NULL.
+ *
* @param[in] reason Exit reason.
*
* @param[in] token Seq trace token.
@@ -299,6 +337,8 @@ erts_proc_sig_send_unlink(Process *c_p, ErtsLink *lnk);
void
erts_proc_sig_send_dist_link_exit(struct dist_entry_ *dep,
Eterm from, Eterm to,
+ ErtsDistExternal *dist_ext,
+ ErlHeapFragment *hfrag,
Eterm reason, Eterm token);
/**
@@ -307,7 +347,7 @@ erts_proc_sig_send_dist_link_exit(struct dist_entry_ *dep,
*
* This function is used instead of erts_proc_sig_send_unlink()
* when the signal arrives via the distribution and
- * no link structure is available.
+ * therefore no link structure is available.
*
* @param[in] dep Distribution entry of channel
* that the signal arrived on.
@@ -380,7 +420,7 @@ erts_proc_sig_send_monitor(ErtsMonitor *mon, Eterm to);
*
* This function is used instead of erts_proc_sig_send_monitor_down()
* when the signal arrives via the distribution and
- * no link structure is available.
+ * therefore no monitor structure is available.
*
* @param[in] dep Pointer to distribution entry
* of channel that the signal
@@ -392,12 +432,19 @@ erts_proc_sig_send_monitor(ErtsMonitor *mon, Eterm to);
*
* @param[in] to Identifier of receiver.
*
+ * @param[in] dist_ext The exit reason in external term format
+ *
+ * @param[in] hfrag Heap frag with trace token and dist_ext
+ * iff available, otherwise NULL.
+ *
* @param[in] reason Exit reason.
*
*/
void
erts_proc_sig_send_dist_monitor_down(DistEntry *dep, Eterm ref,
Eterm from, Eterm to,
+ ErtsDistExternal *dist_ext,
+ ErlHeapFragment *hfrag,
Eterm reason);
/**
@@ -740,7 +787,7 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
* queue.
*/
int
-erts_proc_sig_handle_exit(Process *c_p, int *redsp);
+erts_proc_sig_handle_exit(Process *c_p, Sint *redsp);
/**
*
@@ -962,6 +1009,34 @@ void
erts_proc_sig_handle_pending_suspend(Process *c_p);
/**
+ *
+ * @brief Decode the reason term in an external signal
+ *
+ * Any distributed signal with a payload only has the control
+ * message decoded by the dist entry. The final decode of the
+ * payload is done by the process when it inspects the signal
+ * by calling this function.
+ *
+ * This functions handles both messages and link/monitor exits.
+ *
+ * Return true if the decode was successful, false otherwise.
+ *
+ * @param[in] c_p Pointer to executing process
+ *
+ * @param[in] proc_lock Locks held by process. Should always be MAIN.
+ *
+ * @param[in] msgp The signal to decode
+ *
+ * @param[in] force_off_heap If the term should be forced to be off-heap
+ */
+int
+erts_proc_sig_decode_dist(Process *proc, ErtsProcLocks proc_locks,
+ ErtsMessage *msgp, int force_off_heap);
+
+ErtsDistExternal *
+erts_proc_sig_get_external(ErtsMessage *msgp);
+
+/**
* @brief Initialize this functionality
*/
void erts_proc_sig_queue_init(void);
@@ -970,8 +1045,9 @@ void
erts_proc_sig_debug_foreach_sig(Process *c_p,
void (*msg_func)(ErtsMessage *, void *),
void (*oh_func)(ErlOffHeap *, void *),
- void (*mon_func)(ErtsMonitor *, void *),
- void (*lnk_func)(ErtsLink *, void *),
+ ErtsMonitorFunc mon_func,
+ ErtsLinkFunc lnk_func,
+ void (*ext_func)(ErtsDistExternal *, void *),
void *arg);
extern Process *erts_dirty_process_signal_handler;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 1f464e2e5a..1f6adb98ef 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -735,12 +735,6 @@ erts_pre_init_process(void)
#endif
}
-static void
-release_process(void *vproc)
-{
- erts_proc_dec_refc((Process *) vproc);
-}
-
/* initialize the scheduler */
void
erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
@@ -752,7 +746,7 @@ erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
erts_ptab_init_table(&erts_proc,
ERTS_ALC_T_PROC_TABLE,
- release_process,
+ NULL,
(ErtsPTabElementCommon *) &erts_invalid_process.common,
proc_tab_size,
sizeof(Process),
@@ -1476,7 +1470,10 @@ proclist_create(Process *p)
{
ErtsProcList *plp = proclist_alloc();
ensure_later_proc_interval(p->common.u.alive.started_interval);
- plp->pid = p->common.id;
+ if (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE)
+ plp->u.p = p;
+ else
+ plp->u.pid = p->common.id;
plp->started_interval = p->common.u.alive.started_interval;
return plp;
}
@@ -1485,7 +1482,7 @@ static ERTS_INLINE ErtsProcList *
proclist_copy(ErtsProcList *plp0)
{
ErtsProcList *plp1 = proclist_alloc();
- plp1->pid = plp0->pid;
+ plp1->u.pid = plp0->u.pid;
plp1->started_interval = plp0->started_interval;
return plp1;
}
@@ -1520,7 +1517,10 @@ erts_proclist_dump(fmtfn_t to, void *to_arg, ErtsProcList *plp)
ErtsProcList *first = plp;
while (plp) {
- erts_print(to, to_arg, "%T", plp->pid);
+ if (is_pid(plp->u.pid))
+ erts_print(to, to_arg, "%T", plp->u.pid);
+ else
+ erts_print(to, to_arg, "%T", plp->u.p->common.id);
plp = plp->next;
if (plp == first)
break;
@@ -2375,9 +2375,12 @@ struct debug_lop {
static void later_thr_debug_wait_completed(void *vlop)
{
struct debug_lop *lop = vlop;
- erts_aint32_t count = (erts_aint32_t) erts_no_schedulers;
- count += 1; /* aux thread */
- if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == count) {
+
+ if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == 1) {
+ erts_aint32_t count = (erts_aint32_t) erts_no_schedulers;
+ count += 1; /* aux thread */
+ erts_atomic32_set_nob(&debug_wait_completed_count, count);
+
/* scheduler threads */
erts_schedule_multi_misc_aux_work(0,
erts_no_schedulers,
@@ -2395,19 +2398,28 @@ static void later_thr_debug_wait_completed(void *vlop)
static void
init_thr_debug_wait_completed(void *vproc)
{
- struct debug_lop* lop = erts_alloc(ERTS_ALC_T_DEBUG,
- sizeof(struct debug_lop));
- lop->proc = vproc;
- erts_schedule_thr_prgr_later_op(later_thr_debug_wait_completed, lop, &lop->lop);
+ if (debug_wait_completed_flags == ERTS_DEBUG_WAIT_COMPLETED_AUX_WORK) {
+ if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == 1) {
+ erts_atomic32_set_nob(&debug_wait_completed_count, 0);
+ erts_resume((Process *) vproc, (ErtsProcLocks) 0);
+ erts_proc_dec_refc((Process *) vproc);
+ }
+ }
+ else {
+ struct debug_lop* lop = erts_alloc(ERTS_ALC_T_DEBUG,
+ sizeof(struct debug_lop));
+ lop->proc = vproc;
+ erts_schedule_thr_prgr_later_op(later_thr_debug_wait_completed, lop, &lop->lop);
+ }
}
int
erts_debug_wait_completed(Process *c_p, int flags)
{
- /* Only one process at a time can do this */
- erts_aint32_t count = (erts_aint32_t) (2*erts_no_schedulers);
- count += 1; /* aux thread */
+ /* Only one process at a time can do this, +1 to mark as busy */
+ erts_aint32_t count = (erts_aint32_t) (erts_no_schedulers + 1);
+
if (0 == erts_atomic32_cmpxchg_mb(&debug_wait_completed_count,
count,
0)) {
@@ -3377,7 +3389,12 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ErtsMonotonicTime current_time = 0;
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+
+ if (aux_work && ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ ERTS_INTERNAL_ERROR("Executing aux work on a dirty scheduler.");
+ }
+
+ if (aux_work) {
if (!thr_prgr_active) {
erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
@@ -3389,16 +3406,14 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
if (aux_work) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- flgs = erts_atomic32_read_acqb(&ssi->flags);
- current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
- if (!thr_prgr_active) {
- erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- erts_bump_timers(esdp->timer_wheel, current_time);
+ flgs = erts_atomic32_read_acqb(&ssi->flags);
+ current_time = erts_get_monotonic_time(esdp);
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
}
+ erts_bump_timers(esdp->timer_wheel, current_time);
}
}
else {
@@ -4141,9 +4156,7 @@ schedule_bound_processes(ErtsRunQueue *rq,
static ERTS_INLINE void
clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit)
{
-#ifdef DEBUG
erts_aint32_t old;
-#endif
erts_aint32_t qb = prio_bit;
if (rq == ERTS_DIRTY_CPU_RUNQ)
qb <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET;
@@ -4151,13 +4164,8 @@ clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit)
ASSERT(rq == ERTS_DIRTY_IO_RUNQ);
qb <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET;
}
-#ifdef DEBUG
- old = (int)
-#else
- (void)
-#endif
- erts_atomic32_read_band_mb(&p->dirty_state, ~qb);
- ASSERT(old & qb);
+ old = (int) erts_atomic32_read_band_mb(&p->dirty_state, ~qb);
+ ASSERT(old & qb); (void)old;
}
@@ -6530,8 +6538,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
n &= ~running_flgs;
if ((!!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- | ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE))
- & !(a & ERTS_PSFLG_FREE)) {
+ | ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE))) {
enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
}
a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
@@ -6566,7 +6573,6 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
else {
Process* sched_p;
- ASSERT(!(n & ERTS_PSFLG_FREE));
ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS)));
@@ -6696,8 +6702,8 @@ change_proc_schedule_state(Process *p,
enqueue = ERTS_ENQUEUE_NOT;
- if (a & ERTS_PSFLG_FREE)
- break; /* We don't want to schedule free processes... */
+ if ((a & (ERTS_PSFLG_FREE|ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_FREE)
+ break; /* If free and not active, do not schedule */
if (clear_state_flags)
n &= ~clear_state_flags;
@@ -7223,8 +7229,7 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
while (resume->msb.chngrs) {
ErtsProcList *plp = resume->msb.chngrs;
resume->msb.chngrs = plp->next;
- schdlr_sspnd_resume_proc(sched_type,
- plp->pid);
+ schdlr_sspnd_resume_proc(sched_type, plp->u.pid);
proclist_destroy(plp);
}
}
@@ -7300,9 +7305,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type,
Uint32 nrml_prio, dcpu_prio, dio_prio;
ErtsSchedType exec_type;
ErtsRunQueue *exec_rq;
-#ifdef DEBUG
erts_aint32_t dbg_val;
-#endif
ASSERT(schdlr_sspnd.msb.ongoing);
@@ -7417,16 +7420,12 @@ msb_scheduler_type_switch(ErtsSchedType sched_type,
* Suspend this scheduler and wake up scheduler
* number one of another type...
*/
-#ifdef DEBUG
dbg_val =
-#else
- (void)
-#endif
erts_atomic32_read_bset_mb(&esdp->ssi->flags,
(ERTS_SSI_FLG_SUSPENDED
| ERTS_SSI_FLG_MSB_EXEC),
ERTS_SSI_FLG_SUSPENDED);
- ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC);
+ ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC); (void)dbg_val;
switch (exec_type) {
case ERTS_SCHED_NORMAL:
@@ -7444,11 +7443,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type,
break;
}
-#ifdef DEBUG
dbg_val =
-#else
- (void)
-#endif
erts_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags,
(ERTS_SSI_FLG_SUSPENDED
| ERTS_SSI_FLG_MSB_EXEC),
@@ -7666,7 +7661,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
else {
schdlr_sspnd.changer = am_true; /* change right in transit */
/* resume process that is queued for next change... */
- resume.onln.nxt = plp->pid;
+ resume.onln.nxt = plp->u.pid;
ASSERT(is_internal_pid(resume.onln.nxt));
}
}
@@ -7886,7 +7881,7 @@ abort_sched_onln_chng_waitq(Process *p)
proclist_destroy(plp);
plp = erts_proclist_peek_first(schdlr_sspnd.chngq);
if (plp)
- resume = plp->pid;
+ resume = plp->u.pid;
else
schdlr_sspnd.changer = am_false;
}
@@ -8152,7 +8147,8 @@ done:
ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal, int all)
{
- int resume_proc, ix, res, have_unlocked_plocks = 0;
+ ErtsSchedSuspendResult res;
+ int resume_proc, ix, have_unlocked_plocks = 0;
ErtsProcList *plp;
ErtsMultiSchedulingBlock *msbp;
erts_aint32_t chng_flg;
@@ -8385,10 +8381,10 @@ erts_multi_scheduling_blockers(Process *p, int normal)
plp1;
plp1 = erts_proclist_peek_next(msbp->blckrs, plp1)) {
for (plp2 = erts_proclist_peek_first(msbp->blckrs);
- plp2->pid != plp1->pid;
+ plp2->u.pid != plp1->u.pid;
plp2 = erts_proclist_peek_next(msbp->blckrs, plp2));
if (plp2 == plp1) {
- res = CONS(hp, plp1->pid, res);
+ res = CONS(hp, plp1->u.pid, res);
hp += 2;
}
/* else: already in result list */
@@ -9025,11 +9021,8 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port)
suspend = 1;
if (suspend) {
-#ifdef DEBUG
- int res =
-#endif
- suspend_process(c_p, c_p);
- ASSERT(res);
+ int res = suspend_process(c_p, c_p);
+ ASSERT(res); (void)res;
}
if (!(c_p_locks & ERTS_PROC_LOCK_STATUS))
@@ -9060,8 +9053,13 @@ erts_resume_processes(ErtsProcList *list)
while (plp) {
Process *proc;
ErtsProcList *fplp;
- ASSERT(is_internal_pid(plp->pid));
- proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS);
+ ASSERT(is_internal_pid(plp->u.pid) || is_CP((Eterm)plp->u.p));
+ if (is_internal_pid(plp->u.pid))
+ proc = erts_pid2proc(NULL, 0, plp->u.pid, ERTS_PROC_LOCK_STATUS);
+ else {
+ proc = plp->u.p;
+ erts_proc_lock(proc, ERTS_PROC_LOCK_STATUS);
+ }
if (proc) {
if (erts_proclist_same(plp, proc)) {
resume_process(proc, ERTS_PROC_LOCK_STATUS);
@@ -9654,7 +9652,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
while (1) {
erts_aint32_t exp, new;
- int run_process;
+ int run_process, not_running, exiting_on_normal_sched,
+ not_suspended, not_exiting_on_dirty_sched;
new = exp = state;
new &= psflg_band_mask;
/*
@@ -9663,29 +9662,33 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
* scheduler, and not suspended (and not in a
* state where suspend should be ignored).
*/
- run_process = (((!(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS
- | ERTS_PSFLG_FREE)))
- | (((state & (ERTS_PSFLG_RUNNING
-
- | ERTS_PSFLG_FREE
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING_SYS
- | ERTS_PSFLG_EXITING))
- == ERTS_PSFLG_EXITING)
- & (!!is_normal_sched))
- )
- & ((state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_EXITING
- | ERTS_PSFLG_FREE
- | ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- != ERTS_PSFLG_SUSPENDED)
- & (!(state & ERTS_PSFLG_EXITING)
- | (!!is_normal_sched))
- );
+ not_running = !(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
+ | ERTS_PSFLG_FREE));
+ exiting_on_normal_sched =
+ ((state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
+ | ERTS_PSFLG_EXITING))
+ == (ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE))
+ & (!!is_normal_sched);
+
+
+ not_suspended = ((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_FREE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ != ERTS_PSFLG_SUSPENDED);
+
+ not_exiting_on_dirty_sched = !(state & ERTS_PSFLG_EXITING) | (!!is_normal_sched);
+
+ run_process = (not_running | exiting_on_normal_sched)
+ & not_suspended
+ & not_exiting_on_dirty_sched;
if (run_process) {
if (state & (ERTS_PSFLG_ACTIVE_SYS
@@ -9976,7 +9979,7 @@ trace_schedule_in(Process *p, erts_aint32_t state)
/* Clear tracer if it has been removed */
if (erts_is_tracer_proc_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common)) {
- if (state & ERTS_PSFLG_EXITING) {
+ if (state & ERTS_PSFLG_EXITING && p->u.terminate) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting);
}
@@ -10000,12 +10003,9 @@ trace_schedule_out(Process *p, erts_aint32_t state)
if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
- if ((state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING) {
+ if (state & ERTS_PSFLG_EXITING && p->u.terminate) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, ERTS_PROC_LOCK_MAIN,
- ((state & ERTS_PSFLG_FREE)
- ? am_out_exited
- : am_out_exiting));
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out_exiting);
}
else {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
@@ -12080,12 +12080,104 @@ erts_set_self_exiting(Process *c_p, Eterm reason)
add2runq(enqueue, enq_prio, c_p, state, NULL);
}
-void
-erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
+static int
+erts_proc_exit_handle_dist_monitor(ErtsMonitor *mon, void *vctxt, Sint reds)
+{
+ ErtsProcExitContext *ctxt = (ErtsProcExitContext *) vctxt;
+ Process *c_p = ctxt->c_p;
+ Eterm reason = ctxt->reason;
+ int code;
+ ErtsDSigSendContext ctx;
+ ErtsMonLnkDist *dist;
+ DistEntry *dep;
+ Eterm watcher, ref, *hp;
+ ErtsMonitorData *mdp = NULL;
+ Eterm watched;
+ Uint watcher_sz, ref_sz;
+ ErtsHeapFactory factory;
+ Sint reds_consumed = 0;
+
+ ASSERT(erts_monitor_is_target(mon) && mon->type == ERTS_MON_TYPE_DIST_PROC);
+
+ mdp = erts_monitor_to_data(mon);
+
+ if (mon->flags & ERTS_ML_FLG_NAME)
+ watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
+ else
+ watched = c_p->common.id;
+ ASSERT(is_internal_pid(watched) || is_atom(watched));
+
+ watcher = mon->other.item;
+ ASSERT(is_external_pid(watcher));
+ dep = external_pid_dist_entry(watcher);
+ ASSERT(dep);
+ dist = ((ErtsMonitorDataExtended *) mdp)->dist;
+ ASSERT(dist);
+
+ code = erts_dsig_prepare(&ctx, dep, c_p, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_NO_LOCK, 0, 0, 1);
+
+ ctx.reds = (Sint) (reds * TERM_TO_BINARY_LOOP_FACTOR);
+
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ break;
+ case ERTS_DSIG_PREP_PENDING:
+ case ERTS_DSIG_PREP_CONNECTED:
+ if (dist->connection_id != ctx.connection_id)
+ break;
+ erts_factory_proc_init(&factory, c_p);
+ watcher_sz = size_object(watcher);
+ hp = erts_produce_heap(&factory, watcher_sz, 0);
+ watcher = copy_struct(watcher, watcher_sz, &hp, factory.off_heap);
+ ref_sz = size_object(mdp->ref);
+ hp = erts_produce_heap(&factory, ref_sz, 0);
+ ref = copy_struct(mdp->ref, ref_sz, &hp, factory.off_heap);
+ erts_factory_close(&factory);
+ code = erts_dsig_send_m_exit(&ctx,
+ watcher,
+ watched,
+ ref,
+ reason);
+ reds_consumed = reds - (ctx.reds / TERM_TO_BINARY_LOOP_FACTOR);
+ switch (code) {
+ case ERTS_DSIG_SEND_CONTINUE:
+ case ERTS_DSIG_SEND_YIELD:
+ erts_set_gc_state(c_p, 0);
+ ctxt->dist_state = erts_dsend_export_trap_context(c_p, &ctx);
+ reds_consumed = reds; /* force yield */
+ break;
+ case ERTS_DSIG_SEND_OK:
+ break;
+ case ERTS_DSIG_SEND_TOO_LRG:
+ erts_kill_dist_connection(dep, dist->connection_id);
+ erts_set_gc_state(c_p, 1);
+ break;
+ default:
+ ASSERT(! "Invalid dsig send exit monitor result");
+ break;
+ }
+ break;
+ default:
+ ASSERT(! "Invalid dsig prep exit monitor result");
+ break;
+ }
+ if (!erts_monitor_dist_delete(&mdp->origin))
+ erts_monitor_release(mon);
+ else
+ erts_monitor_release_both(mdp);
+ return reds_consumed;
+}
+
+int
+erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt, Sint reds)
{
- Process *c_p = ((ErtsProcExitContext *) vctxt)->c_p;
- Eterm reason = ((ErtsProcExitContext *) vctxt)->reason;
+ ErtsProcExitContext *ctxt = (ErtsProcExitContext *) vctxt;
+ Process *c_p = ctxt->c_p;
+ Eterm reason = ctxt->reason;
ErtsMonitorData *mdp = NULL;
+ int res = 1;
if (erts_monitor_is_target(mon)) {
/* We are being watched... */
@@ -12115,43 +12207,48 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
case ERTS_MON_TYPE_DIST_PROC: {
ErtsMonLnkDist *dist;
DistEntry *dep;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
int code;
Eterm watcher;
Eterm watched;
- mdp = erts_monitor_to_data(mon);
+ if (is_immed(reason)) {
+ mdp = erts_monitor_to_data(mon);
- if (mon->flags & ERTS_ML_FLG_NAME)
- watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
- else
- watched = c_p->common.id;
- ASSERT(is_internal_pid(watched) || is_atom(watched));
-
- watcher = mon->other.item;
- ASSERT(is_external_pid(watcher));
- dep = external_pid_dist_entry(watcher);
- ASSERT(dep);
- dist = ((ErtsMonitorDataExtended *) mdp)->dist;
- ASSERT(dist);
- code = erts_dsig_prepare(&dsd, dep, NULL, 0,
- ERTS_DSP_NO_LOCK, 0, 0);
- switch (code) {
- case ERTS_DSIG_PREP_CONNECTED:
- case ERTS_DSIG_PREP_PENDING:
- if (dist->connection_id == dsd.connection_id) {
- code = erts_dsig_send_m_exit(&dsd,
- watcher,
- watched,
- mdp->ref,
- reason);
- ASSERT(code == ERTS_DSIG_SEND_OK);
+ if (mon->flags & ERTS_ML_FLG_NAME)
+ watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
+ else
+ watched = c_p->common.id;
+ ASSERT(is_internal_pid(watched) || is_atom(watched));
+
+ watcher = mon->other.item;
+ ASSERT(is_external_pid(watcher));
+ dep = external_pid_dist_entry(watcher);
+ ASSERT(dep);
+ dist = ((ErtsMonitorDataExtended *) mdp)->dist;
+ ASSERT(dist);
+ code = erts_dsig_prepare(&ctx, dep, NULL, 0,
+ ERTS_DSP_NO_LOCK, 1, 1, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_CONNECTED:
+ case ERTS_DSIG_PREP_PENDING:
+ if (dist->connection_id == ctx.connection_id) {
+ code = erts_dsig_send_m_exit(&ctx,
+ watcher,
+ watched,
+ mdp->ref,
+ reason);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ default:
+ break;
}
- default:
- break;
+ if (!erts_monitor_dist_delete(&mdp->origin))
+ mdp = NULL;
+ } else {
+ erts_monitor_tree_insert(&ctxt->dist_monitors, mon);
+ return 1;
}
- if (!erts_monitor_dist_delete(&mdp->origin))
- mdp = NULL;
break;
}
default:
@@ -12193,7 +12290,7 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
case ERTS_MON_TYPE_DIST_PROC: {
ErtsMonLnkDist *dist;
DistEntry *dep;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
int code;
Eterm watched;
@@ -12210,17 +12307,16 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
ASSERT(is_external_pid(watched));
dep = external_pid_dist_entry(watched);
}
- code = erts_dsig_prepare(&dsd, dep, NULL, 0,
- ERTS_DSP_NO_LOCK, 0, 0);
+ code = erts_dsig_prepare(&ctx, dep, NULL, 0,
+ ERTS_DSP_NO_LOCK, 1, 1, 0);
switch (code) {
case ERTS_DSIG_PREP_CONNECTED:
case ERTS_DSIG_PREP_PENDING:
- if (dist->connection_id == dsd.connection_id) {
- code = erts_dsig_send_demonitor(&dsd,
+ if (dist->connection_id == ctx.connection_id) {
+ code = erts_dsig_send_demonitor(&ctx,
c_p->common.id,
watched,
- mdp->ref,
- 1);
+ mdp->ref);
ASSERT(code == ERTS_DSIG_SEND_OK);
}
default:
@@ -12228,6 +12324,7 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
}
if (!erts_monitor_dist_delete(&mdp->target))
mdp = NULL;
+ res = 100;
break;
}
default:
@@ -12240,11 +12337,94 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
erts_monitor_release_both(mdp);
else if (mon)
erts_monitor_release(mon);
+ return res;
}
-void
-erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
+static int
+erts_proc_exit_handle_dist_link(ErtsLink *lnk, void *vctxt, Sint reds)
+{
+ ErtsProcExitContext *ctxt = (ErtsProcExitContext *) vctxt;
+ Process *c_p = ctxt->c_p;
+ Eterm reason = ctxt->reason, item, *hp;
+ Uint item_sz;
+ int code;
+ ErtsDSigSendContext ctx;
+ ErtsMonLnkDist *dist;
+ DistEntry *dep;
+ ErtsLink *dlnk;
+ ErtsLinkData *ldp = NULL;
+ ErtsHeapFactory factory;
+ Sint reds_consumed = 0;
+
+ ASSERT(lnk->type == ERTS_LNK_TYPE_DIST_PROC);
+ dlnk = erts_link_to_other(lnk, &ldp);
+ dist = ((ErtsLinkDataExtended *) ldp)->dist;
+
+ ASSERT(is_external_pid(lnk->other.item));
+ dep = external_pid_dist_entry(lnk->other.item);
+
+ ASSERT(dep != erts_this_dist_entry);
+
+ if (!erts_link_dist_delete(dlnk))
+ ldp = NULL;
+
+ code = erts_dsig_prepare(&ctx, dep, c_p, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_NO_LOCK, 0, 0, 0);
+
+ ctx.reds = (Sint) (reds * TERM_TO_BINARY_LOOP_FACTOR);
+
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ break;
+ case ERTS_DSIG_PREP_PENDING:
+ case ERTS_DSIG_PREP_CONNECTED:
+ if (dist->connection_id != ctx.connection_id)
+ break;
+ erts_factory_proc_init(&factory, c_p);
+ item_sz = size_object(lnk->other.item);
+ hp = erts_produce_heap(&factory, item_sz, 0);
+ item = copy_struct(lnk->other.item, item_sz, &hp, factory.off_heap);
+ erts_factory_close(&factory);
+ code = erts_dsig_send_exit_tt(&ctx,
+ c_p->common.id,
+ item,
+ reason,
+ SEQ_TRACE_TOKEN(c_p));
+ reds_consumed = reds - (ctx.reds / TERM_TO_BINARY_LOOP_FACTOR);
+ switch (code) {
+ case ERTS_DSIG_SEND_YIELD:
+ case ERTS_DSIG_SEND_CONTINUE:
+ erts_set_gc_state(c_p, 0);
+ ctxt->dist_state = erts_dsend_export_trap_context(c_p, &ctx);
+ reds_consumed = reds; /* force yield */
+ break;
+ case ERTS_DSIG_SEND_OK:
+ break;
+ case ERTS_DSIG_SEND_TOO_LRG:
+ erts_kill_dist_connection(dep, dist->connection_id);
+ erts_set_gc_state(c_p, 1);
+ break;
+ default:
+ ASSERT(! "Invalid dsig send exit monitor result");
+ break;
+ }
+ break;
+ default:
+ ASSERT(! "Invalid dsig prep exit monitor result");
+ break;
+ }
+ if (ldp)
+ erts_link_release_both(ldp);
+ else if (lnk)
+ erts_link_release(lnk);
+ return reds_consumed;
+}
+
+int
+erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt, Sint reds)
{
+ ErtsProcExitContext *ctxt = (ErtsProcExitContext *) vctxt;
Process *c_p = ((ErtsProcExitContext *) vctxt)->c_p;
Eterm reason = ((ErtsProcExitContext *) vctxt)->reason;
ErtsLinkData *ldp = NULL;
@@ -12275,32 +12455,40 @@ erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
DistEntry *dep;
ErtsMonLnkDist *dist;
ErtsLink *dlnk;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
int code;
- dlnk = erts_link_to_other(lnk, &ldp);
- dist = ((ErtsLinkDataExtended *) ldp)->dist;
+ if (is_immed(reason)) {
+ dlnk = erts_link_to_other(lnk, &ldp);
+ dist = ((ErtsLinkDataExtended *) ldp)->dist;
- ASSERT(is_external_pid(lnk->other.item));
- dep = external_pid_dist_entry(lnk->other.item);
+ ASSERT(is_external_pid(lnk->other.item));
+ dep = external_pid_dist_entry(lnk->other.item);
- ASSERT(dep != erts_this_dist_entry);
+ ASSERT(dep != erts_this_dist_entry);
- if (!erts_link_dist_delete(dlnk))
- ldp = NULL;
+ if (!erts_link_dist_delete(dlnk))
+ ldp = NULL;
- code = erts_dsig_prepare(&dsd, dep, c_p, 0, ERTS_DSP_NO_LOCK, 0, 0);
- switch (code) {
- case ERTS_DSIG_PREP_CONNECTED:
- case ERTS_DSIG_PREP_PENDING:
- if (dist->connection_id == dsd.connection_id) {
- code = erts_dsig_send_exit_tt(&dsd,
- c_p->common.id,
- lnk->other.item,
- reason,
- SEQ_TRACE_TOKEN(c_p));
- ASSERT(code == ERTS_DSIG_SEND_OK);
+ code = erts_dsig_prepare(&ctx, dep, c_p, 0, ERTS_DSP_NO_LOCK, 1, 1, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_CONNECTED:
+ case ERTS_DSIG_PREP_PENDING:
+ if (dist->connection_id == ctx.connection_id) {
+ code = erts_dsig_send_exit_tt(&ctx,
+ c_p->common.id,
+ lnk->other.item,
+ reason,
+ SEQ_TRACE_TOKEN(c_p));
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ break;
+ default:
+ break;
}
+ } else {
+ erts_link_tree_insert(&ctxt->dist_links, lnk);
+ return 1;
}
break;
}
@@ -12313,6 +12501,7 @@ erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
erts_link_release_both(ldp);
else if (lnk)
erts_link_release(lnk);
+ return 1;
}
/* this function fishishes a process and propagates exit messages - called
@@ -12346,11 +12535,8 @@ erts_do_exit_process(Process* p, Eterm reason)
set_self_exiting(p, reason, NULL, NULL, NULL);
- if (IS_TRACED(p)) {
- if (IS_TRACED_FL(p, F_TRACE_CALLS))
- erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
-
- }
+ if (IS_TRACED_FL(p, F_TRACE_CALLS))
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
erts_trace_check_exiting(p->common.id);
@@ -12365,288 +12551,452 @@ erts_do_exit_process(Process* p, Eterm reason)
erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- if (IS_TRACED_FL(p,F_TRACE_PROCS))
+ if (IS_TRACED_FL(p, F_TRACE_PROCS))
trace_proc(p, ERTS_PROC_LOCK_MAIN, p, am_exit, reason);
-
/*
* p->u.initial of this process can *not* be used anymore;
* will be overwritten by misc termination data.
*/
p->u.terminate = NULL;
+ BUMP_REDS(p, 100);
+
erts_continue_exit_process(p);
}
-void
-erts_continue_exit_process(Process *p)
-{
+enum continue_exit_phase {
+ ERTS_CONTINUE_EXIT_TIMERS,
+ ERTS_CONTINUE_EXIT_BLCKD_MSHED,
+ ERTS_CONTINUE_EXIT_BLCKD_NMSHED,
+ ERTS_CONTINUE_EXIT_USING_DB,
+ ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS,
+ ERTS_CONTINUE_EXIT_FREE,
+ ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS_AFTER,
+ ERTS_CONTINUE_EXIT_LINKS,
+ ERTS_CONTINUE_EXIT_MONITORS,
+ ERTS_CONTINUE_EXIT_LT_MONITORS,
+ ERTS_CONTINUE_EXIT_HANDLE_PROC_SIG,
+ ERTS_CONTINUE_EXIT_DIST_LINKS,
+ ERTS_CONTINUE_EXIT_DIST_MONITORS,
+ ERTS_CONTINUE_EXIT_DONE,
+};
+
+struct continue_exit_state {
+ enum continue_exit_phase phase;
ErtsLink *links;
ErtsMonitor *monitors;
ErtsMonitor *lt_monitors;
+ Eterm reason;
+ ErtsProcExitContext pectxt;
+ DistEntry *dep;
+ void *yield_state;
+};
+
+void
+erts_continue_exit_process(Process *p)
+{
+ struct continue_exit_state static_state, *trap_state = &static_state;
ErtsProcLocks curr_locks = ERTS_PROC_LOCK_MAIN;
- Eterm reason = p->fvalue;
- DistEntry *dep = NULL;
erts_aint32_t state;
int delay_del_proc = 0;
- ErtsProcExitContext pectxt;
-
+ Sint reds = ERTS_BIF_REDS_LEFT(p);
#ifdef DEBUG
int yield_allowed = 1;
#endif
+ if (p->u.terminate) {
+ trap_state = p->u.terminate;
+ /* Re-set the reason as it may have been gc:ed */
+ trap_state->reason = p->fvalue;
+ } else {
+ trap_state->phase = ERTS_CONTINUE_EXIT_TIMERS;
+ trap_state->reason = p->fvalue;
+ trap_state->dep = NULL;
+ trap_state->yield_state = NULL;
+ }
+
ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
ASSERT(ERTS_PROC_IS_EXITING(p));
ASSERT(erts_proc_read_refc(p) > 0);
- if (p->bif_timers) {
- if (erts_cancel_bif_timers(p, &p->bif_timers, &p->u.terminate)) {
- ASSERT(erts_proc_read_refc(p) > 0);
- goto yield;
- }
- ASSERT(erts_proc_read_refc(p) > 0);
- p->bif_timers = NULL;
- }
-
- if (p->flags & F_SCHDLR_ONLN_WAITQ)
- abort_sched_onln_chng_waitq(p);
-
- if (p->flags & F_HAVE_BLCKD_MSCHED) {
- ErtsSchedSuspendResult ssr;
- ssr = erts_block_multi_scheduling(p, ERTS_PROC_LOCK_MAIN, 0, 0, 1);
- switch (ssr) {
- case ERTS_SCHDLR_SSPND_YIELD_RESTART:
- goto yield;
- case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_DONE:
- case ERTS_SCHDLR_SSPND_YIELD_DONE:
- p->flags &= ~F_HAVE_BLCKD_MSCHED;
- break;
- case ERTS_SCHDLR_SSPND_EINVAL:
- default:
- erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n",
- __FILE__, __LINE__, (int) ssr);
- }
- }
- if (p->flags & F_HAVE_BLCKD_NMSCHED) {
- ErtsSchedSuspendResult ssr;
- ssr = erts_block_multi_scheduling(p, ERTS_PROC_LOCK_MAIN, 0, 1, 1);
- switch (ssr) {
- case ERTS_SCHDLR_SSPND_YIELD_RESTART:
- goto yield;
- case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_DONE:
- case ERTS_SCHDLR_SSPND_YIELD_DONE:
- p->flags &= ~F_HAVE_BLCKD_MSCHED;
- break;
- case ERTS_SCHDLR_SSPND_EINVAL:
- default:
- erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n",
- __FILE__, __LINE__, (int) ssr);
- }
- }
+restart:
+ switch (trap_state->phase) {
+ case ERTS_CONTINUE_EXIT_TIMERS:
+ if (p->bif_timers) {
+ reds = erts_cancel_bif_timers(p, &p->bif_timers, &trap_state->yield_state, reds);
+ if (reds <= 0) goto yield;
+ p->bif_timers = NULL;
+ }
- if (p->flags & F_USING_DB) {
- if (erts_db_process_exiting(p, ERTS_PROC_LOCK_MAIN))
- goto yield;
- p->flags &= ~F_USING_DB;
- }
+ if (p->flags & F_SCHDLR_ONLN_WAITQ) {
+ abort_sched_onln_chng_waitq(p);
+ reds -= 100;
+ }
- erts_set_gc_state(p, 1);
- state = erts_atomic32_read_acqb(&p->state);
- if ((state & ERTS_PSFLG_SYS_TASKS) || p->dirty_sys_tasks) {
- if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2)
- goto yield;
- }
+ trap_state->phase = ERTS_CONTINUE_EXIT_BLCKD_MSHED;
+ if (reds <= 0) goto yield;
+ case ERTS_CONTINUE_EXIT_BLCKD_MSHED:
+
+ if (p->flags & F_HAVE_BLCKD_MSCHED) {
+ ErtsSchedSuspendResult ssr;
+ ssr = erts_block_multi_scheduling(p, ERTS_PROC_LOCK_MAIN, 0, 0, 1);
+ switch (ssr) {
+ case ERTS_SCHDLR_SSPND_DONE:
+ case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
+ case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
+ p->flags &= ~F_HAVE_BLCKD_MSCHED;
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n",
+ __FILE__, __LINE__, (int) ssr);
+ }
+ reds -= 100;
+ }
-#ifdef DEBUG
- erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- ASSERT(ERTS_PROC_GET_DELAYED_GC_TASK_QS(p) == NULL);
- ASSERT(p->dirty_sys_tasks == NULL);
- erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
-#endif
+ trap_state->phase = ERTS_CONTINUE_EXIT_BLCKD_NMSHED;
+ if (reds <= 0) goto yield;
+ case ERTS_CONTINUE_EXIT_BLCKD_NMSHED:
+
+ if (p->flags & F_HAVE_BLCKD_NMSCHED) {
+ ErtsSchedSuspendResult ssr;
+ ssr = erts_block_multi_scheduling(p, ERTS_PROC_LOCK_MAIN, 0, 1, 1);
+ switch (ssr) {
+ case ERTS_SCHDLR_SSPND_DONE:
+ case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
+ case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
+ p->flags &= ~F_HAVE_BLCKD_MSCHED;
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n",
+ __FILE__, __LINE__, (int) ssr);
+ }
+ reds -= 100;
+ }
- if (p->flags & F_USING_DDLL) {
- erts_ddll_proc_dead(p, ERTS_PROC_LOCK_MAIN);
- p->flags &= ~F_USING_DDLL;
- }
+ trap_state->yield_state = NULL;
+ trap_state->phase = ERTS_CONTINUE_EXIT_USING_DB;
+ if (reds <= 0) goto yield;
+ case ERTS_CONTINUE_EXIT_USING_DB:
- /*
- * The registered name *should* be the last "erlang resource" to
- * cleanup.
- */
- if (p->common.u.alive.reg) {
- (void) erts_unregister_name(p, ERTS_PROC_LOCK_MAIN, NULL, THE_NON_VALUE);
- ASSERT(!p->common.u.alive.reg);
- }
+ if (p->flags & F_USING_DB) {
+ if (erts_db_process_exiting(p, ERTS_PROC_LOCK_MAIN, &trap_state->yield_state))
+ goto yield;
+ p->flags &= ~F_USING_DB;
+ }
- if (IS_TRACED_FL(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, curr_locks, am_out_exited);
+ trap_state->phase = ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS;
+ case ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS:
- erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- curr_locks = ERTS_PROC_LOCKS_ALL;
+ /* We enable GC again as it can produce more sys-tasks */
+ erts_set_gc_state(p, 1);
+ state = erts_atomic32_read_acqb(&p->state);
+ if ((state & ERTS_PSFLG_SYS_TASKS) || p->dirty_sys_tasks) {
+ reds -= cleanup_sys_tasks(p, state, reds);
+ if (reds <= 0) goto yield;
+ }
+
+ trap_state->phase = ERTS_CONTINUE_EXIT_FREE;
+ case ERTS_CONTINUE_EXIT_FREE:
- /*
- * From this point on we are no longer allowed to yield
- * this process.
- */
#ifdef DEBUG
- yield_allowed = 0;
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ ASSERT(ERTS_PROC_GET_DELAYED_GC_TASK_QS(p) == NULL);
+ ASSERT(p->dirty_sys_tasks == NULL);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
#endif
- /*
- * Note! The monitor and link fields will be overwritten
- * by erts_ptab_delete_element() below.
- */
- links = ERTS_P_LINKS(p);
- monitors = ERTS_P_MONITORS(p);
- lt_monitors = ERTS_P_LT_MONITORS(p);
+ if (p->flags & F_USING_DDLL) {
+ erts_ddll_proc_dead(p, ERTS_PROC_LOCK_MAIN);
+ p->flags &= ~F_USING_DDLL;
+ }
- {
- /* Do *not* use erts_get_runq_proc() */
- ErtsRunQueue *rq;
- rq = erts_get_runq_current(erts_proc_sched_data(p));
+ /*
+ * The registered name *should* be the last "erlang resource" to
+ * cleanup.
+ */
+ if (p->common.u.alive.reg) {
+ (void) erts_unregister_name(p, ERTS_PROC_LOCK_MAIN, NULL, THE_NON_VALUE);
+ ASSERT(!p->common.u.alive.reg);
+ }
- erts_runq_lock(rq);
+ erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ curr_locks = ERTS_PROC_LOCKS_ALL;
- ASSERT(p->scheduler_data);
- ASSERT(p->scheduler_data->current_process == p);
- ASSERT(p->scheduler_data->free_process == NULL);
+ /*
+ * Note! The monitor and link fields will be overwritten
+ * by erts_ptab_delete_element() below.
+ */
+ trap_state->links = ERTS_P_LINKS(p);
+ trap_state->monitors = ERTS_P_MONITORS(p);
+ trap_state->lt_monitors = ERTS_P_LT_MONITORS(p);
- p->scheduler_data->current_process = NULL;
- p->scheduler_data->free_process = p;
+ {
+ /* Do *not* use erts_get_runq_proc() */
+ ErtsRunQueue *rq;
+ rq = erts_get_runq_current(erts_proc_sched_data(p));
- /* Time of death! */
- erts_ptab_delete_element(&erts_proc, &p->common);
+ erts_runq_lock(rq);
- erts_runq_unlock(rq);
- }
+ ASSERT(p->scheduler_data);
+ ASSERT(p->scheduler_data->current_process == p);
+ ASSERT(p->scheduler_data->free_process == NULL);
- /*
- * All "erlang resources" have to be deallocated before this point,
- * e.g. registered name, so monitoring and linked processes can
- * be sure that all interesting resources have been deallocated
- * when the monitors and/or links hit.
- */
+ /* Time of death! */
+ erts_ptab_delete_element(&erts_proc, &p->common);
- {
- /* Inactivate and notify free */
- erts_aint32_t n, e, a = erts_atomic32_read_nob(&p->state);
- int refc_inced = 0;
- while (1) {
- n = e = a;
- ASSERT(a & ERTS_PSFLG_EXITING);
- n |= ERTS_PSFLG_FREE;
- n &= ~(ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_DIRTY_ACTIVE_SYS);
- if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) {
- erts_proc_inc_refc(p);
- refc_inced = 1;
- }
- a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
- if (a == e)
- break;
- }
+ erts_runq_unlock(rq);
+ }
- if (a & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
- p->flags |= F_DELAYED_DEL_PROC;
- delay_del_proc = 1;
- /*
- * The dirty scheduler decrease refc
- * when done with the process...
- */
- }
+ /*
+ * All "erlang resources" have to be deallocated before this point,
+ * e.g. registered name, so monitoring and linked processes can
+ * be sure that all interesting resources have been deallocated
+ * when the monitors and/or links hit.
+ */
- if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
- erts_proc_dec_refc(p);
- }
+ {
+ /* Inactivate and notify free */
+ erts_aint32_t n, e, a = erts_atomic32_read_nob(&p->state);
+ int refc_inced = 0;
+ while (1) {
+ n = e = a;
+ ASSERT(a & ERTS_PSFLG_EXITING);
+ n |= ERTS_PSFLG_FREE;
+ if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) {
+ erts_proc_inc_refc(p);
+ refc_inced = 1;
+ }
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (a == e)
+ break;
+ }
- dep = ((p->flags & F_DISTRIBUTION)
- ? ERTS_PROC_SET_DIST_ENTRY(p, NULL)
- : NULL);
+ if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
+ erts_proc_dec_refc(p);
+ }
+ trap_state->dep = ((p->flags & F_DISTRIBUTION)
+ ? ERTS_PROC_SET_DIST_ENTRY(p, NULL)
+ : NULL);
+
+ reds -= 50;
- /*
- * It might show up signal prio elevation tasks until we
- * have entered free state. Cleanup such tasks now.
- */
- state = erts_atomic32_read_acqb(&p->state);
- if (!(state & ERTS_PSFLG_SYS_TASKS))
- erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
- else {
erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ curr_locks = ERTS_PROC_LOCK_MAIN;
+ trap_state->phase = ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS_AFTER;
+ case ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS_AFTER:
+ /*
+ * It might show up signal prio elevation tasks until we
+ * have entered free state. Cleanup such tasks now.
+ */
- do {
- (void) cleanup_sys_tasks(p, state, CONTEXT_REDS);
- state = erts_atomic32_read_acqb(&p->state);
- } while (state & ERTS_PSFLG_SYS_TASKS);
-
- erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- }
+ state = erts_atomic32_read_acqb(&p->state);
+ if ((state & ERTS_PSFLG_SYS_TASKS) || p->dirty_sys_tasks) {
+ reds -= cleanup_sys_tasks(p, state, reds);
+ if (reds <= 0) goto yield;
+ }
+
+ /* Needs to be unlocked for erts_do_net_exits to work?!? */
+ // erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
#ifdef DEBUG
- erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- ASSERT(p->sys_task_qs == NULL);
- erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ ASSERT(p->sys_task_qs == NULL);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
#endif
- if (dep) {
- erts_do_net_exits(dep, (reason == am_kill) ? am_killed : reason);
- erts_deref_dist_entry(dep);
- }
+ if (trap_state->dep) {
+ erts_do_net_exits(trap_state->dep,
+ (trap_state->reason == am_kill) ? am_killed : trap_state->reason);
+ erts_deref_dist_entry(trap_state->dep);
+ }
- pectxt.c_p = p;
- pectxt.reason = reason;
+ /* Disable GC so that reason does not get moved */
+ erts_set_gc_state(p, 0);
+
+ trap_state->pectxt.c_p = p;
+ trap_state->pectxt.reason = trap_state->reason;
+ trap_state->pectxt.dist_links = NULL;
+ trap_state->pectxt.dist_monitors = NULL;
+ trap_state->pectxt.dist_state = NIL;
+
+ erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
+
+ erts_proc_sig_fetch(p);
+
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+
+ trap_state->yield_state = NULL;
+ trap_state->phase = ERTS_CONTINUE_EXIT_LINKS;
+ if (reds <= 0) goto yield;
+ case ERTS_CONTINUE_EXIT_LINKS:
+
+ reds = erts_link_tree_foreach_delete_yielding(
+ &trap_state->links,
+ erts_proc_exit_handle_link,
+ (void *) &trap_state->pectxt,
+ &trap_state->yield_state,
+ reds);
+ if (reds <= 0)
+ goto yield;
+
+ ASSERT(!trap_state->links);
+ trap_state->yield_state = NULL;
+ trap_state->phase = ERTS_CONTINUE_EXIT_MONITORS;
+ case ERTS_CONTINUE_EXIT_MONITORS:
+
+ reds = erts_monitor_tree_foreach_delete_yielding(
+ &trap_state->monitors,
+ erts_proc_exit_handle_monitor,
+ (void *) &trap_state->pectxt,
+ &trap_state->yield_state,
+ reds);
+ if (reds <= 0)
+ goto yield;
+
+ ASSERT(!trap_state->monitors);
+ trap_state->yield_state = NULL;
+ trap_state->phase = ERTS_CONTINUE_EXIT_LT_MONITORS;
+ case ERTS_CONTINUE_EXIT_LT_MONITORS:
+
+ reds = erts_monitor_list_foreach_delete_yielding(
+ &trap_state->lt_monitors,
+ erts_proc_exit_handle_monitor,
+ (void *) &trap_state->pectxt,
+ &trap_state->yield_state,
+ reds);
+ if (reds <= 0)
+ goto yield;
+
+ ASSERT(!trap_state->lt_monitors);
+ trap_state->phase = ERTS_CONTINUE_EXIT_HANDLE_PROC_SIG;
+ case ERTS_CONTINUE_EXIT_HANDLE_PROC_SIG: {
+ Sint r = reds;
+
+ if (!erts_proc_sig_handle_exit(p, &r))
+ goto yield;
+
+ reds -= r;
+
+ trap_state->phase = ERTS_CONTINUE_EXIT_DIST_LINKS;
+ }
+ case ERTS_CONTINUE_EXIT_DIST_LINKS: {
+
+ continue_dist_send:
+ if (is_not_nil(trap_state->pectxt.dist_state)) {
+ Binary* bin = erts_magic_ref2bin(trap_state->pectxt.dist_state);
+ ErtsDSigSendContext* ctx = (ErtsDSigSendContext*) ERTS_MAGIC_BIN_DATA(bin);
+ Sint initial_reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR);
+ int result;
+
+ ctx->reds = initial_reds;
+ result = erts_dsig_send(ctx);
+
+ /* erts_dsig_send bumps reductions on the process in the ctx */
+ reds = ERTS_BIF_REDS_LEFT(p);
+
+ switch (result) {
+ case ERTS_DSIG_SEND_OK:
+ break;
+ case ERTS_DSIG_SEND_TOO_LRG: /*SEND_SYSTEM_LIMIT*/
+ erts_kill_dist_connection(ctx->dep, ctx->connection_id);
+ break;
+ case ERTS_DSIG_SEND_YIELD: /*SEND_YIELD_RETURN*/
+ case ERTS_DSIG_SEND_CONTINUE: { /*SEND_YIELD_CONTINUE*/
+ goto yield;
+ }
+ }
- erts_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ);
+ trap_state->pectxt.dist_state = NIL;
+ if (reds <= 0)
+ goto yield;
+ goto restart;
+ }
- erts_proc_sig_fetch(p);
+ reds = erts_link_tree_foreach_delete_yielding(
+ &trap_state->pectxt.dist_links,
+ erts_proc_exit_handle_dist_link,
+ (void *) &trap_state->pectxt,
+ &trap_state->yield_state,
+ reds);
+ if (reds <= 0 || is_not_nil(trap_state->pectxt.dist_state))
+ goto yield;
+ trap_state->phase = ERTS_CONTINUE_EXIT_DIST_MONITORS;
+ }
+ case ERTS_CONTINUE_EXIT_DIST_MONITORS: {
+
+ if (is_not_nil(trap_state->pectxt.dist_state))
+ goto continue_dist_send;
+
+ reds = erts_monitor_tree_foreach_delete_yielding(
+ &trap_state->pectxt.dist_monitors,
+ erts_proc_exit_handle_dist_monitor,
+ (void *) &trap_state->pectxt,
+ &trap_state->yield_state,
+ reds);
+ if (reds <= 0 || is_not_nil(trap_state->pectxt.dist_state))
+ goto yield;
+
+ trap_state->phase = ERTS_CONTINUE_EXIT_DONE;
+ }
+ case ERTS_CONTINUE_EXIT_DONE: {
+ erts_aint_t state;
+ /*
+ * From this point on we are no longer allowed to yield
+ * this process.
+ */
+#ifdef DEBUG
+ yield_allowed = 0;
+#endif
- erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+ erts_set_gc_state(p, 1);
- if (links) {
- erts_link_tree_foreach_delete(&links,
- erts_proc_exit_handle_link,
- (void *) &pectxt);
- ASSERT(!links);
- }
+ /* Set state to not active as we don't want this process
+ to be scheduled in again after this. */
+ state = erts_atomic32_read_band_relb(&p->state,
+ ~(ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS));
- if (monitors) {
- erts_monitor_tree_foreach_delete(&monitors,
- erts_proc_exit_handle_monitor,
- (void *) &pectxt);
- ASSERT(!monitors);
- }
+ ASSERT(p->scheduler_data);
+ ASSERT(p->scheduler_data->current_process == p);
+ ASSERT(p->scheduler_data->free_process == NULL);
+
+ p->scheduler_data->current_process = NULL;
+ p->scheduler_data->free_process = p;
+
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ p->flags |= F_DELAYED_DEL_PROC;
+ delay_del_proc = 1;
+ /*
+ * The dirty scheduler decrease refc
+ * when done with the process...
+ */
+ }
- if (lt_monitors) {
- erts_monitor_list_foreach_delete(&lt_monitors,
- erts_proc_exit_handle_monitor,
- (void *) &pectxt);
- ASSERT(!lt_monitors);
+ erts_schedule_thr_prgr_later_cleanup_op(
+ (void (*)(void*))erts_proc_dec_refc,
+ (void *) &p->common,
+ &p->common.u.release,
+ sizeof(Process));
+
+ break;
+ }
}
- /*
- * erts_proc_sig_handle_exit() implements yielding.
- * However, this function cannot handle it yet... loop
- * until done...
- */
- while (!0) {
- int reds = CONTEXT_REDS;
- if (erts_proc_sig_handle_exit(p, &reds))
- break;
+ if (trap_state != &static_state) {
+ erts_free(ERTS_ALC_T_CONT_EXIT_TRAP, trap_state);
+ p->u.terminate = NULL;
}
ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, curr_locks, am_out_exited);
+
erts_flush_trace_messages(p, ERTS_PROC_LOCK_MAIN);
ERTS_TRACER_CLEAR(&ERTS_TRACER(p));
@@ -12658,15 +13008,30 @@ erts_continue_exit_process(Process *p)
yield:
-#ifdef DEBUG
ASSERT(yield_allowed);
-#endif
ERTS_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p));
ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks);
+ ASSERT(erts_proc_read_refc(p) > 0);
+
+ if (trap_state == &static_state) {
+ trap_state = erts_alloc(ERTS_ALC_T_CONT_EXIT_TRAP, sizeof(*trap_state));
+ sys_memcpy(trap_state, &static_state, sizeof(*trap_state));
+ p->u.terminate = trap_state;
+ }
+
+ ASSERT(p->scheduler_data);
+ ASSERT(p->scheduler_data->current_process == p);
+ ASSERT(p->scheduler_data->free_process == NULL);
+
+ if (trap_state->phase >= ERTS_CONTINUE_EXIT_FREE) {
+ p->scheduler_data->current_process = NULL;
+ p->scheduler_data->free_process = p;
+ }
p->i = (BeamInstr *) beam_continue_exit;
+ /* Why is this lock take??? */
if (!(curr_locks & ERTS_PROC_LOCK_STATUS)) {
erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
curr_locks |= ERTS_PROC_LOCK_STATUS;
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 43937f216c..6118c671ee 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -381,7 +381,10 @@ struct ErtsSchedulerSleepInfo_ {
typedef struct ErtsProcList_ ErtsProcList;
struct ErtsProcList_ {
- Eterm pid;
+ union {
+ Eterm pid;
+ Process *p;
+ } u;
Uint64 started_interval;
ErtsProcList* next;
ErtsProcList* prev;
@@ -638,6 +641,7 @@ struct ErtsSchedulerData_ {
ErtsSchedType type;
Uint no; /* Scheduler number for normal schedulers */
Uint dirty_no; /* Scheduler number for dirty schedulers */
+ int flxctr_slot_no; /* slot nr when a flxctr is used */
struct enif_environment_t *current_nif;
Process *dirty_shadow_process;
Port *current_port;
@@ -1319,9 +1323,6 @@ ERTS_GLB_INLINE void erts_heap_frag_shrink(Process* p, Eterm* hp)
#endif /* inline */
Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra);
-#ifdef CHECK_FOR_HOLES
-Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz);
-#endif
extern erts_rwmtx_t erts_cpu_bind_rwmtx;
/* If any of the erts_system_monitor_* variables are set (enabled),
@@ -1580,7 +1581,7 @@ ERTS_GLB_INLINE int erts_proclist_is_last(ErtsProcList *, ErtsProcList *);
ERTS_GLB_INLINE int
erts_proclist_same(ErtsProcList *plp, Process *p)
{
- return (plp->pid == p->common.id
+ return ((plp->u.pid == p->common.id || plp->u.p == p)
&& (plp->started_interval
== p->common.u.alive.started_interval));
}
@@ -1819,9 +1820,12 @@ Eterm erts_process_info(Process *c_p, ErtsHeapFactory *hfact,
typedef struct {
Process *c_p;
Eterm reason;
+ ErtsLink *dist_links;
+ ErtsMonitor *dist_monitors;
+ Eterm dist_state;
} ErtsProcExitContext;
-void erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt);
-void erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt);
+int erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt, Sint reds);
+int erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt, Sint reds);
Eterm erts_get_process_priority(erts_aint32_t state);
Eterm erts_set_process_priority(Process *p, Eterm prio);
@@ -1844,12 +1848,14 @@ int erts_resume_processes(ErtsProcList *);
void erts_deep_process_dump(fmtfn_t, void *);
Eterm erts_get_reader_groups_map(Process *c_p);
+Eterm erts_get_decentralized_counter_groups_map(Process *c_p);
Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
Uint erts_debug_nbalance(void);
#define ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS (1 << 0)
#define ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS (1 << 1)
+#define ERTS_DEBUG_WAIT_COMPLETED_AUX_WORK (1 << 2)
int erts_debug_wait_completed(Process *c_p, int flags);
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index ac5054ea10..71262061dd 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -100,16 +100,18 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg)
dump_binaries(to, to_arg, all_binaries);
}
-static void
-monitor_size(ErtsMonitor *mon, void *vsize)
+static int
+monitor_size(ErtsMonitor *mon, void *vsize, Sint reds)
{
*((Uint *) vsize) += erts_monitor_size(mon);
+ return 1;
}
-static void
-link_size(ErtsMonitor *lnk, void *vsize)
+static int
+link_size(ErtsMonitor *lnk, void *vsize, Sint reds)
{
*((Uint *) vsize) += erts_link_size(lnk);
+ return 1;
}
Uint erts_process_memory(Process *p, int include_sigs_in_transit)
@@ -119,12 +121,14 @@ Uint erts_process_memory(Process *p, int include_sigs_in_transit)
size += sizeof(Process);
- erts_link_tree_foreach(ERTS_P_LINKS(p),
- link_size, (void *) &size);
- erts_monitor_tree_foreach(ERTS_P_MONITORS(p),
- monitor_size, (void *) &size);
- erts_monitor_list_foreach(ERTS_P_LT_MONITORS(p),
- monitor_size, (void *) &size);
+ if ((erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING) == 0) {
+ erts_link_tree_foreach(ERTS_P_LINKS(p),
+ link_size, (void *) &size);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(p),
+ monitor_size, (void *) &size);
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(p),
+ monitor_size, (void *) &size);
+ }
size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
if (p->abandoned_heap)
size += (p->hend - p->heap) * sizeof(Eterm);
@@ -189,11 +193,11 @@ static ERTS_INLINE void
dump_msg(fmtfn_t to, void *to_arg, ErtsMessage *mp)
{
if (ERTS_SIG_IS_MSG((ErtsSignal *) mp)) {
- Eterm mesg = ERL_MESSAGE_TERM(mp);
- if (is_value(mesg))
- dump_element(to, to_arg, mesg);
+ Eterm mesg;
+ if (ERTS_SIG_IS_INTERNAL_MSG(mp))
+ dump_element(to, to_arg, ERL_MESSAGE_TERM(mp));
else
- dump_dist_ext(to, to_arg, mp->data.dist_ext);
+ dump_dist_ext(to, to_arg, erts_get_dist_ext(mp->data.heap_frag));
mesg = ERL_MESSAGE_TOKEN(mp);
erts_print(to, to_arg, ":");
dump_element(to, to_arg, mesg);
@@ -265,6 +269,7 @@ dump_dist_ext(fmtfn_t to, void *to_arg, ErtsDistExternal *edep)
else {
byte *e;
size_t sz;
+ int i;
if (!(edep->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB))
erts_print(to, to_arg, "D0:");
@@ -274,8 +279,8 @@ dump_dist_ext(fmtfn_t to, void *to_arg, ErtsDistExternal *edep)
for (i = 0; i < edep->attab.size; i++)
dump_element(to, to_arg, edep->attab.atom[i]);
}
- sz = edep->ext_endp - edep->extp;
- e = edep->extp;
+ sz = edep->data->ext_endp - edep->data->extp;
+ e = edep->data->extp;
if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) {
ASSERT(*e != VERSION_MAGIC);
sz++;
@@ -286,15 +291,19 @@ dump_dist_ext(fmtfn_t to, void *to_arg, ErtsDistExternal *edep)
erts_print(to, to_arg, "E%X:", sz);
if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) {
byte sbuf[3];
- int i = 0;
+
+ i = 0;
sbuf[i++] = VERSION_MAGIC;
- while (i < sizeof(sbuf) && e < edep->ext_endp) {
+ while (i < sizeof(sbuf) && e < edep->data->ext_endp) {
sbuf[i++] = *e++;
}
erts_print_base64(to, to_arg, sbuf, i);
}
- erts_print_base64(to, to_arg, e, edep->ext_endp - e);
+ erts_print_base64(to, to_arg, e, edep->data->ext_endp - e);
+ for (i = 1; i < edep->data->frag_id; i++)
+ erts_print_base64(to, to_arg, edep->data[i].extp,
+ edep->data[i].ext_endp - edep->data[i].extp);
}
}
@@ -1005,6 +1014,10 @@ dump_module_literals(fmtfn_t to, void *to_arg, ErtsLiteralArea* lit_area)
}
size = 1 + header_arity(w);
switch (w & _HEADER_SUBTAG_MASK) {
+ case FUN_SUBTAG:
+ ASSERT(((ErlFunThing*)(htop))->num_free == 0);
+ size += 1;
+ break;
case MAP_SUBTAG:
if (is_flatmap_header(w)) {
size += 1 + flatmap_get_size(htop);
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
index 94f0247492..c30a684002 100644
--- a/erts/emulator/beam/erl_ptab.h
+++ b/erts/emulator/beam/erl_ptab.h
@@ -68,8 +68,11 @@ typedef struct {
Uint64 started_interval;
struct reg_proc *reg;
ErtsLink *links;
- ErtsMonitor *monitors;
+ /* Local target monitors, double linked list
+ contains the remote part of local monitors */
ErtsMonitor *lt_monitors;
+ /* other monitors, rb tree */
+ ErtsMonitor *monitors;
} alive;
/* --- While being released --- */
diff --git a/erts/emulator/beam/erl_rbtree.h b/erts/emulator/beam/erl_rbtree.h
index e50abf5cec..ce401fa7e7 100644
--- a/erts/emulator/beam/erl_rbtree.h
+++ b/erts/emulator/beam/erl_rbtree.h
@@ -161,7 +161,7 @@
*
* - void <ERTS_RBT_PREFIX>_rbt_foreach(
* ERTS_RBT_T *tree,
- * void (*op)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
* void *arg);
* Operate by calling the operator 'op' on each element.
* Order is undefined.
@@ -170,7 +170,7 @@
*
* - void <ERTS_RBT_PREFIX>_rbt_foreach_destroy(
* ERTS_RBT_T *tree,
- * void (*op)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
* void *arg);
* Operate by calling the operator 'op' on each element.
* Order is undefined. Each element should be destroyed
@@ -180,39 +180,46 @@
*
* - int <ERTS_RBT_PREFIX>_rbt_foreach_yielding(
* ERTS_RBT_T *tree,
- * void (*op)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
* void *arg,
* <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
- * Sint ylimit);
+ * Sint reds);
* Operate by calling the operator 'op' on each element.
* Order is undefined.
*
- * Yield when 'ylimit' elements has been processed. True is
- * returned when yielding, and false is returned when
- * the whole tree has been processed. The tree should not be
- * modified until all of it has been processed.
+ * Yield when 'reds' reductions has been processed. The 'op'
+ * function return the number of reductions that each element
+ * took to process. The number of reductions remaining is returned,
+ * meaning that if 0 is returned, there are more elements to be
+ * processed. If a value greater than 0 is returned the foreach has
+ * ended. The tree should not be modified until all of it has been
+ * processed.
*
* 'arg' is passed as argument to 'op'.
*
* - int <ERTS_RBT_PREFIX>_rbt_foreach_destroy_yielding(
* ERTS_RBT_T *tree,
- * void (*op)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
* void *arg,
* <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
- * Sint ylimit);
+ * Sint reds);
* Operate by calling the operator 'op' on each element.
* Order is undefined. Each element should be destroyed
* by 'op'.
*
- * Yield when 'ylimit' elements has been processed. True is
- * returned when yielding, and false is returned when
- * the whole tree has been processed.
+ * Yield when 'reds' reductions has been processed. The 'op'
+ * function return the number of reductions that each element
+ * took to process. The number of reductions remaining is returned,
+ * meaning that if 0 is returned, there are more elements to be
+ * processed. If a value greater than 0 is returned the foreach has
+ * ended. The tree should not be modified until all of it has been
+ * processed.
*
* 'arg' is passed as argument to 'op'.
*
* - void <ERTS_RBT_PREFIX>_rbt_foreach_small(
* ERTS_RBT_T *tree,
- * void (*op)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
* void *arg);
* Operate by calling the operator 'op' on each element from
* smallest towards larger elements.
@@ -221,7 +228,7 @@
*
* - void <ERTS_RBT_PREFIX>_rbt_foreach_large(
* ERTS_RBT_T *tree,
- * void (*op)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
* void *arg);
* Operate by calling the operator 'op' on each element from
* largest towards smaller elements.
@@ -230,40 +237,46 @@
*
* - int <ERTS_RBT_PREFIX>_rbt_foreach_small_yielding(
* ERTS_RBT_T *tree,
- * void (*op)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
* void *arg,
* <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
- * Sint ylimit);
+ * Sint reds);
* Operate by calling the operator 'op' on each element from
* smallest towards larger elements.
*
- * Yield when 'ylimit' elements has been processed. True is
- * returned when yielding, and false is returned when
- * the whole tree has been processed. The tree should not be
- * modified until all of it has been processed.
+ * Yield when 'reds' reductions has been processed. The 'op'
+ * function return the number of reductions that each element
+ * took to process. The number of reductions remaining is returned,
+ * meaning that if 0 is returned, there are more elements to be
+ * processed. If a value greater than 0 is returned the foreach has
+ * ended. The tree should not be modified until all of it has been
+ * processed.
*
* 'arg' is passed as argument to 'op'.
*
* - int <ERTS_RBT_PREFIX>_rbt_foreach_large_yielding(
* ERTS_RBT_T *tree,
- * void (*op)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
* void *arg,
* <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
- * Sint ylimit);
+ * Sint reds);
* Operate by calling the operator 'op' on each element from
* largest towards smaller elements.
*
- * Yield when 'ylimit' elements has been processed. True is
- * returned when yielding, and false is returned when
- * the whole tree has been processed. The tree should not be
- * modified until all of it has been processed.
+ * Yield when 'reds' reductions has been processed. The 'op'
+ * function return the number of reductions that each element
+ * took to process. The number of reductions remaining is returned,
+ * meaning that if 0 is returned, there are more elements to be
+ * processed. If a value greater than 0 is returned the foreach has
+ * ended. The tree should not be modified until all of it has been
+ * processed.
*
* 'arg' is passed as argument to 'op'.
*
* - void <ERTS_RBT_PREFIX>_rbt_foreach_small_destroy(
* ERTS_RBT_T **tree,
- * void (*op)(ERTS_RBT_T *, void *),
- * void (*destr)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
+ * int (*destr)(ERTS_RBT_T *, void *),
* void *arg);
* Operate by calling the operator 'op' on each element from
* smallest towards larger elements.
@@ -277,8 +290,8 @@
*
* - void <ERTS_RBT_PREFIX>_rbt_foreach_large_destroy(
* ERTS_RBT_T **tree,
- * void (*op)(ERTS_RBT_T *, void *),
- * void (*destr)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
+ * int (*destr)(ERTS_RBT_T *, void *),
* void *arg);
* Operate by calling the operator 'op' on each element from
* largest towards smaller elements.
@@ -292,11 +305,11 @@
*
* - int <ERTS_RBT_PREFIX>_rbt_foreach_small_destroy_yielding(
* ERTS_RBT_T **tree,
- * void (*op)(ERTS_RBT_T *, void *),
- * void (*destr)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
+ * int (*destr)(ERTS_RBT_T *, void *),
* void *arg,
* <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
- * Sint ylimit);
+ * Sint reds);
* Operate by calling the operator 'op' on each element from
* smallest towards larger elements.
*
@@ -305,20 +318,23 @@
* Note that elements are often destroyed in another order
* than the order that the elements are operated on.
*
- * Yield when 'ylimit' elements has been processed. True is
- * returned when yielding, and false is returned when
- * the whole tree has been processed. The tree should not be
- * modified until all of it has been processed.
+ * Yield when 'reds' reductions has been processed. The 'op' and
+ * 'destr' functions return the number of reductions that each element
+ * took to process. The number of reductions remaining is returned,
+ * meaning that if 0 is returned, there are more elements to be
+ * processed. If a value greater than 0 is returned the foreach has
+ * ended. The tree should not be modified until all of it has been
+ * processed.
*
* 'arg' is passed as argument to 'op' and 'destroy'.
*
* - int <ERTS_RBT_PREFIX>_rbt_foreach_large_destroy_yielding(
* ERTS_RBT_T **tree,
- * void (*op)(ERTS_RBT_T *, void *),
- * void (*destr)(ERTS_RBT_T *, void *),
+ * int (*op)(ERTS_RBT_T *, void *),
+ * int (*destr)(ERTS_RBT_T *, void *),
* void *arg,
* <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
- * Sint ylimit);
+ * Sint reds);
* Operate by calling the operator 'op' on each element from
* largest towards smaller elements.
*
@@ -327,10 +343,13 @@
* Note that elements are often destroyed in another order
* than the order that the elements are operated on.
*
- * Yield when 'ylimit' elements has been processed. True is
- * returned when yielding, and false is returned when
- * the whole tree has been processed. The tree should not be
- * modified until all of it has been processed.
+ * Yield when 'reds' reductions has been processed. The 'op' and
+ * 'destr' functions return the number of reductions that each element
+ * took to process. The number of reductions remaining is returned,
+ * meaning that if 0 is returned, there are more elements to be
+ * processed. If a value greater than 0 is returned the foreach has
+ * ended. The tree should not be modified until all of it has been
+ * processed.
*
* 'arg' is passed as argument to 'op' and 'destroy'.
*
@@ -447,17 +466,6 @@
# define ERTS_RBT_API_INLINE__ ERTS_INLINE
#endif
-#ifndef ERTS_RBT_YIELD_STAT_INITER
-# define ERTS_RBT_YIELD_STAT_INITER {NULL, 0}
-#endif
-#ifndef ERTS_RBT_YIELD_STAT_INIT
-# define ERTS_RBT_YIELD_STAT_INIT(YS) \
- do { \
- (YS)->x = NULL; \
- (YS)->up = 0; \
- } while (0)
-#endif
-
#define ERTS_RBT_CONCAT_MACRO_VALUES___(X, Y) \
X ## Y
#define ERTS_RBT_CONCAT_MACRO_VALUES__(X, Y) \
@@ -470,8 +478,38 @@
typedef struct {
ERTS_RBT_T *x;
int up;
+#ifdef DEBUG
+ int debug_red_adj;
+#endif
} ERTS_RBT_YIELD_STATE_T__;
+#define ERTS_RBT_CALLBACK_FOREACH_FUNC(NAME) int (*NAME)(ERTS_RBT_T *, void *, Sint)
+
+#ifndef ERTS_RBT_YIELD_STAT_INITER
+# ifdef DEBUG
+# define ERTS_RBT_YIELD_STAT_INITER {NULL, 0, CONTEXT_REDS}
+# else
+# define ERTS_RBT_YIELD_STAT_INITER {NULL, 0}
+# endif
+#endif
+#ifndef ERTS_RBT_YIELD_STAT_INIT
+# define ERTS_RBT_YIELD_STAT_INIT__(YS) \
+ do { \
+ (YS)->x = NULL; \
+ (YS)->up = 0; \
+ } while (0)
+# ifdef DEBUG
+# define ERTS_RBT_YIELD_STAT_INIT(YS) \
+ do { \
+ ERTS_RBT_YIELD_STAT_INIT__(YS); \
+ (YS)->debug_red_adj = CONTEXT_REDS; \
+ } while(0)
+# else
+# define ERTS_RBT_YIELD_STAT_INIT(YS) ERTS_RBT_YIELD_STAT_INIT__(YS)
+# endif
+#endif
+
+
#define ERTS_RBT_FUNC__(Name) \
ERTS_RBT_CONCAT_MACRO_VALUES__(ERTS_RBT_PREFIX, _rbt_ ## Name)
@@ -1302,11 +1340,11 @@ ERTS_RBT_FUNC__(largest)(ERTS_RBT_T *root)
static ERTS_INLINE int
ERTS_RBT_FUNC__(foreach_unordered__)(ERTS_RBT_T **root,
int destroying,
- void (*op)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
void *arg,
- int yielding,
+ int yielding,
ERTS_RBT_YIELD_STATE_T__ *ystate,
- Sint ylimit)
+ Sint reds)
{
ERTS_RBT_T *c, *p, *x;
@@ -1314,13 +1352,17 @@ ERTS_RBT_FUNC__(foreach_unordered__)(ERTS_RBT_T **root,
if (yielding && ystate->x) {
x = ystate->x;
+#ifdef DEBUG
+ if (ystate->debug_red_adj > 0)
+ ystate->debug_red_adj -= 100;
+#endif
ERTS_RBT_ASSERT(ystate->up);
goto restart_up;
}
else {
x = *root;
if (!x)
- return 0;
+ return reds;
if (destroying)
*root = NULL;
}
@@ -1346,10 +1388,10 @@ ERTS_RBT_FUNC__(foreach_unordered__)(ERTS_RBT_T **root,
#ifdef ERTS_RBT_DEBUG
int cdir;
#endif
- if (yielding && ylimit-- <= 0) {
+ if (yielding && reds <= 0) {
ystate->x = x;
ystate->up = 1;
- return 1;
+ return 0;
}
restart_up:
@@ -1375,14 +1417,20 @@ ERTS_RBT_FUNC__(foreach_unordered__)(ERTS_RBT_T **root,
}
#endif
- (*op)(x, arg);
+ reds -= (*op)(x, arg, reds);
+#ifdef DEBUG
+ if (yielding)
+ reds -= ystate->debug_red_adj;
+#endif
if (!p) {
+ /* Done */
if (yielding) {
ystate->x = NULL;
ystate->up = 0;
+ return reds <= 0 ? 1 : reds;
}
- return 0; /* Done */
+ return 1;
}
c = ERTS_RBT_GET_RIGHT(p);
@@ -1407,20 +1455,26 @@ static ERTS_INLINE int
ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root,
int from_small,
int destroying,
- void (*op)(ERTS_RBT_T *, void *),
- void (*destroy)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(destroy),
void *arg,
- int yielding,
+ int yielding,
ERTS_RBT_YIELD_STATE_T__ *ystate,
- Sint ylimit)
+ Sint reds)
{
ERTS_RBT_T *c, *p, *x;
ERTS_RBT_ASSERT(!yielding || ystate);
ERTS_RBT_ASSERT(!destroying || destroy);
+ ERTS_RBT_ASSERT(!yielding || yop);
+ ERTS_RBT_ASSERT(yielding || op);
if (yielding && ystate->x) {
x = ystate->x;
+#ifdef DEBUG
+ if (ystate->debug_red_adj > 0)
+ ystate->debug_red_adj -= 100;
+#endif
if (ystate->up)
goto restart_up;
else
@@ -1429,7 +1483,7 @@ ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root,
else {
x = *root;
if (!x)
- return 0;
+ return reds;
if (destroying)
*root = NULL;
}
@@ -1445,12 +1499,16 @@ ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root,
x = c;
}
- (*op)(x, arg);
+ reds -= (*op)(x, arg, reds);
+#ifdef DEBUG
+ if (yielding)
+ reds -= ystate->debug_red_adj;
+#endif
- if (yielding && --ylimit <= 0) {
+ if (yielding && reds <= 0) {
ystate->x = x;
ystate->up = 0;
- return 1;
+ return 0;
}
restart_down:
@@ -1472,12 +1530,16 @@ ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root,
? ERTS_RBT_GET_LEFT(p)
: ERTS_RBT_GET_RIGHT(p)) == x);
- (*op)(p, arg);
+ reds -= (*op)(p, arg, reds);
+#ifdef DEBUG
+ if (yielding)
+ reds -= ystate->debug_red_adj;
+#endif
- if (yielding && --ylimit <= 0) {
+ if (yielding && reds <= 0) {
ystate->x = x;
ystate->up = 1;
- return 1;
+ return 0;
restart_up:
p = ERTS_RBT_GET_PARENT(x);
}
@@ -1510,15 +1572,20 @@ ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root,
}
#endif
- (*destroy)(x, arg);
+ reds -= (*destroy)(x, arg, reds);
+#ifdef DEBUG
+ if (yielding)
+ reds -= ystate->debug_red_adj;
+#endif
}
if (!p) {
if (yielding) {
ystate->x = NULL;
ystate->up = 0;
+ return reds <= 0 ? 1 : reds;
}
- return 0; /* Done */
+ return 1; /* Done */
}
x = p;
}
@@ -1531,7 +1598,7 @@ ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root,
static ERTS_RBT_API_INLINE__ void
ERTS_RBT_FUNC__(foreach)(ERTS_RBT_T *root,
- void (*op)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
void *arg)
{
(void) ERTS_RBT_FUNC__(foreach_unordered__)(&root, 0, op, arg,
@@ -1544,7 +1611,7 @@ ERTS_RBT_FUNC__(foreach)(ERTS_RBT_T *root,
static ERTS_RBT_API_INLINE__ void
ERTS_RBT_FUNC__(foreach_small)(ERTS_RBT_T *root,
- void (*op)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
void *arg)
{
(void) ERTS_RBT_FUNC__(foreach_ordered__)(&root, 1, 0,
@@ -1558,7 +1625,7 @@ ERTS_RBT_FUNC__(foreach_small)(ERTS_RBT_T *root,
static ERTS_RBT_API_INLINE__ void
ERTS_RBT_FUNC__(foreach_large)(ERTS_RBT_T *root,
- void (*op)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
void *arg)
{
(void) ERTS_RBT_FUNC__(foreach_ordered__)(&root, 0, 0,
@@ -1572,13 +1639,13 @@ ERTS_RBT_FUNC__(foreach_large)(ERTS_RBT_T *root,
static ERTS_RBT_API_INLINE__ int
ERTS_RBT_FUNC__(foreach_yielding)(ERTS_RBT_T *root,
- void (*op)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
void *arg,
ERTS_RBT_YIELD_STATE_T__ *ystate,
- Sint ylimit)
+ Sint reds)
{
return ERTS_RBT_FUNC__(foreach_unordered__)(&root, 0, op, arg,
- 1, ystate, ylimit);
+ 1, ystate, reds);
}
#endif /* ERTS_RBT_WANT_FOREACH_YIELDING */
@@ -1587,14 +1654,14 @@ ERTS_RBT_FUNC__(foreach_yielding)(ERTS_RBT_T *root,
static ERTS_RBT_API_INLINE__ int
ERTS_RBT_FUNC__(foreach_small_yielding)(ERTS_RBT_T *root,
- void (*op)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
void *arg,
ERTS_RBT_YIELD_STATE_T__ *ystate,
- Sint ylimit)
+ Sint reds)
{
return ERTS_RBT_FUNC__(foreach_ordered__)(&root, 1, 0,
op, NULL, arg,
- 1, ystate, ylimit);
+ 1, ystate, reds);
}
#endif /* ERTS_RBT_WANT_FOREACH_SMALL_YIELDING */
@@ -1603,14 +1670,14 @@ ERTS_RBT_FUNC__(foreach_small_yielding)(ERTS_RBT_T *root,
static ERTS_RBT_API_INLINE__ int
ERTS_RBT_FUNC__(foreach_large_yielding)(ERTS_RBT_T *root,
- void (*op)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
void *arg,
ERTS_RBT_YIELD_STATE_T__ *ystate,
- Sint ylimit)
+ Sint reds)
{
return ERTS_RBT_FUNC__(foreach_ordered__)(&root, 0, 0,
op, NULL, arg,
- 1, ystate, ylimit);
+ 1, ystate, reds);
}
#endif /* ERTS_RBT_WANT_FOREACH_LARGE_YIELDING */
@@ -1619,11 +1686,11 @@ ERTS_RBT_FUNC__(foreach_large_yielding)(ERTS_RBT_T *root,
static ERTS_RBT_API_INLINE__ void
ERTS_RBT_FUNC__(foreach_destroy)(ERTS_RBT_T **root,
- void (*op)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
void *arg)
{
(void) ERTS_RBT_FUNC__(foreach_unordered__)(root, 1, op, arg,
- 0, NULL, 0);
+ 0, NULL, 0);
}
#endif /* ERTS_RBT_WANT_FOREACH_DESTROY */
@@ -1632,8 +1699,8 @@ ERTS_RBT_FUNC__(foreach_destroy)(ERTS_RBT_T **root,
static ERTS_RBT_API_INLINE__ void
ERTS_RBT_FUNC__(foreach_small_destroy)(ERTS_RBT_T **root,
- void (*op)(ERTS_RBT_T *, void *),
- void (*destr)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(destr),
void *arg)
{
(void) ERTS_RBT_FUNC__(foreach_ordered__)(root, 1, 1,
@@ -1647,8 +1714,8 @@ ERTS_RBT_FUNC__(foreach_small_destroy)(ERTS_RBT_T **root,
static ERTS_RBT_API_INLINE__ void
ERTS_RBT_FUNC__(foreach_large_destroy)(ERTS_RBT_T **root,
- void (*op)(ERTS_RBT_T *, void *),
- void (*destr)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(destr),
void *arg)
{
(void) ERTS_RBT_FUNC__(foreach_ordered__)(root, 0, 1,
@@ -1662,13 +1729,13 @@ ERTS_RBT_FUNC__(foreach_large_destroy)(ERTS_RBT_T **root,
static ERTS_RBT_API_INLINE__ int
ERTS_RBT_FUNC__(foreach_destroy_yielding)(ERTS_RBT_T **root,
- void (*op)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
void *arg,
ERTS_RBT_YIELD_STATE_T__ *ystate,
- Sint ylimit)
+ Sint reds)
{
return ERTS_RBT_FUNC__(foreach_unordered__)(root, 1, op, arg,
- 1, ystate, ylimit);
+ 1, ystate, reds);
}
#endif /* ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING */
@@ -1677,15 +1744,15 @@ ERTS_RBT_FUNC__(foreach_destroy_yielding)(ERTS_RBT_T **root,
static ERTS_RBT_API_INLINE__ int
ERTS_RBT_FUNC__(foreach_small_destroy_yielding)(ERTS_RBT_T **root,
- void (*op)(ERTS_RBT_T *, void *),
- void (*destr)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(destr),
void *arg,
ERTS_RBT_YIELD_STATE_T__ *ystate,
- Sint ylimit)
+ Sint reds)
{
return ERTS_RBT_FUNC__(foreach_ordered__)(root, 1, 1,
op, destr, arg,
- 1, ystate, ylimit);
+ 1, ystate, reds);
}
#endif /* ERTS_RBT_WANT_FOREACH_SMALL_DESTROY_YIELDING */
@@ -1694,15 +1761,15 @@ ERTS_RBT_FUNC__(foreach_small_destroy_yielding)(ERTS_RBT_T **root,
static ERTS_RBT_API_INLINE__ int
ERTS_RBT_FUNC__(foreach_large_destroy_yielding)(ERTS_RBT_T **root,
- void (*op)(ERTS_RBT_T *, void *),
- void (*destr)(ERTS_RBT_T *, void *),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(op),
+ ERTS_RBT_CALLBACK_FOREACH_FUNC(destr),
void *arg,
ERTS_RBT_YIELD_STATE_T__ *ystate,
- Sint ylimit)
+ Sint reds)
{
return ERTS_RBT_FUNC__(foreach_ordered__)(root, 0, 1,
op, destr, arg,
- 1, ystate, ylimit);
+ 1, ystate, reds);
}
#endif /* ERTS_RBT_WANT_FOREACH_LARGE_DESTROY_YIELDING */
@@ -1855,6 +1922,7 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root, ERTS_RBT_T *n)
#ifdef ERTS_RBT_UNDEF
# undef ERTS_RBT_PREFIX
# undef ERTS_RBT_T
+# undef ERTS_RBT_CALLBACK_FOREACH_FUNC
# undef ERTS_RBT_KEY_T
# undef ERTS_RBT_FLAGS_T
# undef ERTS_RBT_INIT_EMPTY_TNODE
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 29c698e34f..d26ea19494 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -1911,8 +1911,8 @@ typedef struct {
ErtsTimeOffsetMonitorInfo *to_mon_info;
} ErtsTimeOffsetMonitorContext;
-static void
-save_time_offset_monitor(ErtsMonitor *mon, void *vcntxt)
+static int
+save_time_offset_monitor(ErtsMonitor *mon, void *vcntxt, Sint reds)
{
ErtsTimeOffsetMonitorContext *cntxt;
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
@@ -1935,7 +1935,7 @@ save_time_offset_monitor(ErtsMonitor *mon, void *vcntxt)
cntxt->to_mon_info[mix].ref
= make_internal_ref(&cntxt->to_mon_info[mix].heap[0]);
-
+ return 1;
}
static void
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index ae7084b7f4..c85a7df5ec 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -635,9 +635,11 @@ write_sys_msg_to_port(Eterm unused_to,
Eterm message) {
byte *buffer;
byte *ptr;
- unsigned size;
+ Uint size;
+
+ if (erts_encode_ext_size(message, &size) != ERTS_EXT_SZ_OK)
+ erts_exit(ERTS_ERROR_EXIT, "Internal error: System limit\n");
- size = erts_encode_ext_size(message);
buffer = (byte *) erts_alloc(ERTS_ALC_T_TMP, size);
ptr = buffer;
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index d225916ac5..1d6869a7cd 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -1358,11 +1358,9 @@ Uint erts_atom_to_string_length(Eterm atom)
else {
byte* err_pos;
Uint num_chars;
-#ifdef DEBUG
int ares =
-#endif
erts_analyze_utf8(ap->name, ap->len, &err_pos, &num_chars, NULL);
- ASSERT(ares == ERTS_UTF8_OK);
+ ASSERT(ares == ERTS_UTF8_OK); (void)ares;
return num_chars;
}
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 880febba8b..430ac305c5 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -69,7 +69,6 @@ int erts_fit_in_bits_int32(Sint32);
int erts_fit_in_bits_uint(Uint);
Sint erts_list_length(Eterm);
int erts_is_builtin(Eterm, Eterm, int);
-Uint32 block_hash(byte *, unsigned, Uint32);
Uint32 make_hash2(Eterm);
Uint32 make_hash(Eterm);
Uint32 make_internal_hash(Eterm, Uint32 salt);
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index d37c2940c4..e623148587 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -41,8 +41,8 @@
#define MAX_REG 1024 /* Max number of x(N) registers used */
/*
- * The new arithmetic operations need some extra X registers in the register array.
- * so does the gc_bif's (i_gc_bif3 need 3 extra).
+ * The new trapping length/1 implementation need 3 extra registers in the
+ * register array.
*/
#define ERTS_X_REGS_ALLOCATED (MAX_REG+3)
@@ -67,9 +67,10 @@
(unsigned long)HEAP_TOP(p),(sz),__FILE__,__LINE__)), \
*/
# ifdef CHECK_FOR_HOLES
-# define INIT_HEAP_MEM(p,sz) erts_set_hole_marker(HEAP_TOP(p), (sz))
+Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz);
+# define INIT_HEAP_MEM(p,sz) erts_set_hole_marker(p, (sz))
# else
-# define INIT_HEAP_MEM(p,sz) sys_memset(HEAP_TOP(p),0x01,(sz)*sizeof(Eterm*))
+# define INIT_HEAP_MEM(p,sz) sys_memset(p,0x01,(sz)*sizeof(Eterm*))
# endif
#else
# define INIT_HEAP_MEM(p,sz) ((void)0)
@@ -91,7 +92,7 @@
ErtsHAllocLockCheck(p), \
(IS_FORCE_HEAP_FRAGS || (((HEAP_LIMIT(p) - HEAP_TOP(p)) < (sz))) \
? erts_heap_alloc((p),(sz),(xtra)) \
- : (INIT_HEAP_MEM(p,sz), \
+ : (INIT_HEAP_MEM(HEAP_TOP(p),sz), \
HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz))))
#define HAlloc(P, SZ) HAllocX(P,SZ,0)
@@ -146,6 +147,21 @@
(HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz))))
#endif
+/*
+ * Always allocate in a heap fragment, never on the heap.
+ */
+#if defined(VALGRIND)
+/* Running under valgrind, allocate exactly as much as needed.*/
+# define HeapFragOnlyAlloc(p, sz) \
+ (ASSERT((sz) >= 0), \
+ ErtsHAllocLockCheck(p), \
+ erts_heap_alloc((p),(sz),0))
+#else
+# define HeapFragOnlyAlloc(p, sz) \
+ (ASSERT((sz) >= 0), \
+ ErtsHAllocLockCheck(p), \
+ erts_heap_alloc((p),(sz),512))
+#endif
/*
* Description for each instruction (defined here because the name and
@@ -167,8 +183,6 @@ extern const int num_instructions; /* Number of instruction in opc[]. */
extern Uint erts_instr_count[];
-extern int tuple_module_apply;
-
/* some constants for various table sizes etc */
#define ATOM_TEXT_SIZE 32768 /* Increment for allocating atom text space */
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 1ded5f031c..ec67ab2aed 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -112,13 +112,13 @@ static byte* dec_pid(ErtsDistExternal *, ErtsHeapFactory*, byte*, Eterm*, byte t
static Sint decoded_size(byte *ep, byte* endp, int internal_tags, struct B2TContext_t*);
static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1);
-static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint flags,
- Binary *context_b);
+static Eterm erts_term_to_binary_int(Process* p, Eterm Term, Eterm opts, int level,
+ Uint flags, Binary *context_b);
static Uint encode_size_struct2(ErtsAtomCacheMap *, Eterm, unsigned);
struct TTBSizeContext_;
-static int encode_size_struct_int(struct TTBSizeContext_*, ErtsAtomCacheMap *acmp, Eterm obj,
- unsigned dflags, Sint *reds, Uint *res);
+static ErtsExtSzRes encode_size_struct_int(struct TTBSizeContext_*, ErtsAtomCacheMap *acmp,
+ Eterm obj, unsigned dflags, Sint *reds, Uint *res);
static Export binary_to_term_trap_export;
static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1);
@@ -262,19 +262,12 @@ erts_finalize_atom_cache_map(ErtsAtomCacheMap *acmp, Uint32 dflags)
if (acmp) {
int long_atoms = 0; /* !0 if one or more atoms are longer than 255. */
int i;
- int sz;
- int fix_sz
- = 1 /* VERSION_MAGIC */
- + 1 /* DIST_HEADER */
- + 1 /* dist header flags */
- + 1 /* number of internal cache entries */
- ;
+ int sz = 0;
int min_sz;
ASSERT(dflags & DFLAG_UTF8_ATOMS);
ASSERT(acmp->hdr_sz < 0);
/* Make sure cache update instructions fit */
- min_sz = fix_sz+(2+4)*acmp->sz;
- sz = fix_sz;
+ min_sz = (2+4)*acmp->sz;
for (i = 0; i < acmp->sz; i++) {
Atom *a;
Eterm atom;
@@ -302,17 +295,28 @@ erts_finalize_atom_cache_map(ErtsAtomCacheMap *acmp, Uint32 dflags)
}
Uint
-erts_encode_ext_dist_header_size(ErtsAtomCacheMap *acmp)
+erts_encode_ext_dist_header_size(ErtsAtomCacheMap *acmp, Uint fragments)
{
if (!acmp)
return 0;
else {
+ int fix_sz
+ = 1 /* VERSION_MAGIC */
+ + 1 /* DIST_HEADER */
+ + 1 /* dist header flags */
+ + 1 /* number of internal cache entries */
+ ;
ASSERT(acmp->hdr_sz >= 0);
- return acmp->hdr_sz;
+ if (fragments > 1)
+ fix_sz += 8 /* sequence id */
+ + 8 /* number of fragments */
+ ;
+ return fix_sz + acmp->hdr_sz;
}
}
-byte *erts_encode_ext_dist_header_setup(byte *ctl_ext, ErtsAtomCacheMap *acmp)
+byte *erts_encode_ext_dist_header_setup(byte *ctl_ext, ErtsAtomCacheMap *acmp,
+ Uint fragments, Eterm from)
{
/* Maximum number of atom must be less than the maximum of a 32 bits
unsigned integer. Check is done in erl_init.c, erl_start function. */
@@ -346,12 +350,37 @@ byte *erts_encode_ext_dist_header_setup(byte *ctl_ext, ErtsAtomCacheMap *acmp)
put_int8(acmp->sz, ep);
--ep;
put_int8(dist_hdr_flags, ep);
- *--ep = DIST_HEADER;
- *--ep = VERSION_MAGIC;
+ if (fragments > 1) {
+ ASSERT(is_pid(from));
+ ep -= 8;
+ put_int64(fragments, ep);
+ ep -= 8;
+ put_int64(from, ep);
+ *--ep = DIST_FRAG_HEADER;
+ } else {
+ *--ep = DIST_HEADER;
+ }
+ *--ep = VERSION_MAGIC;
return ep;
}
}
+byte *erts_encode_ext_dist_header_fragment(byte **hdrpp,
+ Uint fragment,
+ Eterm from)
+{
+ byte *ep = *hdrpp, *start = ep;
+ ASSERT(is_pid(from));
+ *ep++ = VERSION_MAGIC;
+ *ep++ = DIST_FRAG_CONT;
+ put_int64(from, ep);
+ ep += 8;
+ put_int64(fragment, ep);
+ ep += 8;
+ *hdrpp = ep;
+ return start;
+}
+
#define PASS_THROUGH 'p'
@@ -365,7 +394,8 @@ Sint erts_encode_ext_dist_header_finalize(ErtsDistOutputBuf* ob,
int ci, sz;
byte dist_hdr_flags;
int long_atoms;
- register byte *ep = ob->extp;
+ Uint64 seq_id = 0, frag_id = 0;
+ register byte *ep = ob->hdrp ? ob->hdrp : ob->extp;
ASSERT(dflags & DFLAG_UTF8_ATOMS);
/*
@@ -416,7 +446,7 @@ Sint erts_encode_ext_dist_header_finalize(ErtsDistOutputBuf* ob,
}
goto done;
}
- else if (ep[1] != DIST_HEADER) {
+ else if (ep[1] != DIST_HEADER && ep[1] != DIST_FRAG_HEADER && ep[1] != DIST_FRAG_CONT) {
ASSERT(ep[1] == SMALL_TUPLE_EXT || ep[1] == LARGE_TUPLE_EXT);
ASSERT(!(dflags & DFLAG_DIST_HDR_ATOM_CACHE));
/* Node without atom cache, 'pass through' needed */
@@ -424,6 +454,17 @@ Sint erts_encode_ext_dist_header_finalize(ErtsDistOutputBuf* ob,
goto done;
}
+ if (ep[1] == DIST_FRAG_CONT) {
+ ep = ob->extp;
+ goto done;
+ } else if (ep[1] == DIST_FRAG_HEADER) {
+ /* skip the seq id and frag id */
+ seq_id = get_int64(&ep[2]);
+ ep += 8;
+ frag_id = get_int64(&ep[2]);
+ ep += 8;
+ }
+
dist_hdr_flags = ep[2];
long_atoms = ERTS_DIST_HDR_LONG_ATOMS_FLG & ((int) dist_hdr_flags);
@@ -546,57 +587,66 @@ Sint erts_encode_ext_dist_header_finalize(ErtsDistOutputBuf* ob,
}
--ep;
put_int8(ci, ep);
- *--ep = DIST_HEADER;
+ if (seq_id) {
+ ep -= 8;
+ put_int64(frag_id, ep);
+ ep -= 8;
+ put_int64(seq_id, ep);
+ *--ep = DIST_FRAG_HEADER;
+ } else {
+ *--ep = DIST_HEADER;
+ }
*--ep = VERSION_MAGIC;
done:
ob->extp = ep;
- ASSERT(&ob->data[0] <= ob->extp && ob->extp < ob->ext_endp);
+ ASSERT((byte*)ob->bin->orig_bytes <= ob->extp && ob->extp < ob->ext_endp);
return reds < 0 ? 0 : reds;
}
-int erts_encode_dist_ext_size(Eterm term, Uint32 flags, ErtsAtomCacheMap *acmp,
- Uint* szp)
+ErtsExtSzRes
+erts_encode_dist_ext_size(Eterm term, Uint32 flags, ErtsAtomCacheMap *acmp, Uint* szp)
{
Uint sz;
- if (encode_size_struct_int(NULL, acmp, term, flags, NULL, &sz)) {
- return -1;
- } else {
+ ErtsExtSzRes res = encode_size_struct_int(NULL, acmp, term, flags, NULL, &sz);
+ if (res == ERTS_EXT_SZ_OK) {
#ifndef ERTS_DEBUG_USE_DIST_SEP
if (!(flags & (DFLAG_DIST_HDR_ATOM_CACHE | DFLAG_NO_MAGIC)))
#endif
sz++ /* VERSION_MAGIC */;
*szp += sz;
- return 0;
}
+ return res;
}
-int erts_encode_dist_ext_size_int(Eterm term, struct erts_dsig_send_context* ctx, Uint* szp)
+ErtsExtSzRes
+erts_encode_dist_ext_size_ctx(Eterm term, ErtsDSigSendContext *ctx, Uint* szp)
{
Uint sz;
- if (encode_size_struct_int(&ctx->u.sc, ctx->acmp, term, ctx->flags, &ctx->reds, &sz)) {
- return -1;
- } else {
+ ErtsExtSzRes res = encode_size_struct_int(&ctx->u.sc, ctx->acmp, term,
+ ctx->flags, &ctx->reds, &sz);
+ if (res == ERTS_EXT_SZ_OK) {
#ifndef ERTS_DEBUG_USE_DIST_SEP
if (!(ctx->flags & (DFLAG_DIST_HDR_ATOM_CACHE | DFLAG_NO_MAGIC)))
#endif
sz++ /* VERSION_MAGIC */;
*szp += sz;
- return 0;
}
+ return res;
}
-Uint erts_encode_ext_size(Eterm term)
+ErtsExtSzRes erts_encode_ext_size_2(Eterm term, unsigned dflags, Uint *szp)
{
- return encode_size_struct2(NULL, term, TERM_TO_BINARY_DFLAGS)
- + 1 /* VERSION_MAGIC */;
+ ErtsExtSzRes res = encode_size_struct_int(NULL, NULL, term, dflags,
+ NULL, szp);
+ (*szp)++ /* VERSION_MAGIC */;
+ return res;
}
-Uint erts_encode_ext_size_2(Eterm term, unsigned dflags)
+ErtsExtSzRes erts_encode_ext_size(Eterm term, Uint *szp)
{
- return encode_size_struct2(NULL, term, dflags)
- + 1 /* VERSION_MAGIC */;
+ return erts_encode_ext_size_2(term, TERM_TO_BINARY_DFLAGS, szp);
}
Uint erts_encode_ext_size_ets(Eterm term)
@@ -635,56 +685,110 @@ byte* erts_encode_ext_ets(Eterm term, byte *ep, struct erl_off_heap_header** off
off_heap);
}
-ErtsDistExternal *
-erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize)
+
+static Uint
+dist_ext_size(ErtsDistExternal *edep)
{
- size_t align_sz;
- size_t dist_ext_sz;
- size_t ext_sz;
- byte *ep;
- ErtsDistExternal *new_edep;
+ Uint sz = sizeof(ErtsDistExternal);
- dist_ext_sz = ERTS_DIST_EXT_SIZE(edep);
- ASSERT(edep->ext_endp && edep->extp);
- ASSERT(edep->ext_endp >= edep->extp);
- ext_sz = edep->ext_endp - edep->extp;
+ ASSERT(edep->data->ext_endp && edep->data->extp);
+ ASSERT(edep->data->ext_endp >= edep->data->extp);
- align_sz = ERTS_EXTRA_DATA_ALIGN_SZ(dist_ext_sz + ext_sz);
+ if (edep->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB) {
+ ASSERT(0 <= edep->attab.size \
+ && edep->attab.size <= ERTS_ATOM_CACHE_SIZE);
+ sz -= sizeof(Eterm)*(ERTS_ATOM_CACHE_SIZE - edep->attab.size);
+ } else {
+ sz -= sizeof(ErtsAtomTranslationTable);
+ }
+ ASSERT(sz % 4 == 0);
+ return sz;
+}
- new_edep = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA,
- dist_ext_sz + ext_sz + align_sz + xsize);
+Uint
+erts_dist_ext_size(ErtsDistExternal *edep)
+{
+ Uint sz = dist_ext_size(edep);
+ sz += 4; /* may need to pad to 8-byte-align ErtsDistExternalData */
+ sz += edep->data[0].frag_id * sizeof(ErtsDistExternalData);
+ return sz;
+}
+
+Uint
+erts_dist_ext_data_size(ErtsDistExternal *edep)
+{
+ Uint sz = 0, i;
+ for (i = 0; i < edep->data->frag_id; i++)
+ sz += edep->data[i].ext_endp - edep->data[i].extp;
+ return sz;
+}
+
+void
+erts_dist_ext_frag(ErtsDistExternalData *ede_datap, ErtsDistExternal *edep)
+{
+ ErtsDistExternalData *new_ede_datap = &edep->data[edep->data->frag_id - ede_datap->frag_id];
+ sys_memcpy(new_ede_datap, ede_datap, sizeof(ErtsDistExternalData));
+
+ /* If the data is not backed by a binary, we create one here to keep
+ things simple. Only custom distribution drivers should use lists. */
+ if (new_ede_datap->binp == NULL) {
+ size_t ext_sz = ede_datap->ext_endp - ede_datap->extp;
+ new_ede_datap->binp = erts_bin_nrml_alloc(ext_sz);
+ sys_memcpy(new_ede_datap->binp->orig_bytes, (void *) ede_datap->extp, ext_sz);
+ new_ede_datap->extp = (byte*)new_ede_datap->binp->orig_bytes;
+ new_ede_datap->ext_endp = (byte*)new_ede_datap->binp->orig_bytes + ext_sz;
+ } else {
+ erts_refc_inc(&new_ede_datap->binp->intern.refc, 2);
+ }
+}
+
+void
+erts_make_dist_ext_copy(ErtsDistExternal *edep, ErtsDistExternal *new_edep)
+{
+ size_t dist_ext_sz = dist_ext_size(edep);
+ byte *ep;
ep = (byte *) new_edep;
sys_memcpy((void *) ep, (void *) edep, dist_ext_sz);
+ erts_ref_dist_entry(new_edep->dep);
+
ep += dist_ext_sz;
- if (new_edep->dep)
- erts_ref_dist_entry(new_edep->dep);
- new_edep->extp = ep;
- new_edep->ext_endp = ep + ext_sz;
- new_edep->heap_size = -1;
- sys_memcpy((void *) ep, (void *) edep->extp, ext_sz);
- return new_edep;
+ ep += (UWord)ep & 4; /* 8-byte alignment for ErtsDistExternalData */
+ ASSERT((UWord)ep % 8 == 0);
+
+ new_edep->data = (ErtsDistExternalData*)ep;
+ sys_memzero(new_edep->data, sizeof(ErtsDistExternalData) * edep->data->frag_id);
+ new_edep->data->frag_id = edep->data->frag_id;
+ erts_dist_ext_frag(edep->data, new_edep);
}
-int
+void
+erts_free_dist_ext_copy(ErtsDistExternal *edep)
+{
+ int i;
+ erts_deref_dist_entry(edep->dep);
+ for (i = 0; i < edep->data->frag_id; i++)
+ if (edep->data[i].binp)
+ erts_bin_release(edep->data[i].binp);
+}
+
+ErtsPrepDistExtRes
erts_prepare_dist_ext(ErtsDistExternal *edep,
byte *ext,
Uint size,
+ Binary *binp,
DistEntry *dep,
Uint32 conn_id,
ErtsAtomCache *cache)
{
register byte *ep;
- edep->heap_size = -1;
- edep->flags = 0;
- edep->dep = dep;
-
ASSERT(dep);
erts_de_rlock(dep);
ASSERT(dep->flags & DFLAG_UTF8_ATOMS);
+
if ((dep->state != ERTS_DE_STATE_CONNECTED &&
dep->state != ERTS_DE_STATE_PENDING)
|| dep->connection_id != conn_id) {
@@ -697,7 +801,7 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
ext++;
size--;
}
- edep->ext_endp = ext + size;
+
ep = ext;
if (size < 2)
@@ -713,16 +817,33 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
goto fail;
}
+ edep->heap_size = -1;
+ edep->flags = 0;
+ edep->dep = dep;
+ edep->connection_id = conn_id;
+ edep->data->ext_endp = ext+size;
+ edep->data->binp = binp;
+ edep->data->seq_id = 0;
+ edep->data->frag_id = 1;
+
if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)
edep->flags |= ERTS_DIST_EXT_DFLAG_HDR;
- edep->connection_id = dep->connection_id;
-
- if (ep[1] != DIST_HEADER) {
+ if (ep[1] != DIST_HEADER && ep[1] != DIST_FRAG_HEADER && ep[1] != DIST_FRAG_CONT) {
if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR)
goto bad_hdr;
edep->attab.size = 0;
- edep->extp = ext;
+ edep->data->extp = ext;
+ }
+ else if (ep[1] == DIST_FRAG_CONT) {
+ if (!(dep->flags & DFLAG_FRAGMENTS))
+ goto bad_hdr;
+ edep->attab.size = 0;
+ edep->data->extp = ext + 1 + 1 + 8 + 8;
+ edep->data->seq_id = get_int64(&ep[2]);
+ edep->data->frag_id = get_int64(&ep[2+8]);
+ erts_de_runlock(dep);
+ return ERTS_PREP_DIST_EXT_FRAG_CONT;
}
else {
int tix;
@@ -731,9 +852,17 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
if (!(edep->flags & ERTS_DIST_EXT_DFLAG_HDR))
goto bad_hdr;
+ if (ep[1] == DIST_FRAG_HEADER) {
+ if (!(dep->flags & DFLAG_FRAGMENTS))
+ goto bad_hdr;
+ edep->data->seq_id = get_int64(&ep[2]);
+ edep->data->frag_id = get_int64(&ep[2+8]);
+ ep += 16;
+ }
+
#undef CHKSIZE
#define CHKSIZE(SZ) \
- do { if ((SZ) > edep->ext_endp - ep) goto bad_hdr; } while(0)
+ do { if ((SZ) > edep->data->ext_endp - ep) goto bad_hdr; } while(0)
CHKSIZE(1+1+1);
ep += 2;
@@ -863,7 +992,7 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
#endif
}
}
- edep->extp = ep;
+ edep->data->extp = ep;
#ifdef ERTS_DEBUG_USE_DIST_SEP
if (*ep != VERSION_MAGIC)
goto bad_hdr;
@@ -888,7 +1017,7 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
erts_this_node->sysname,
edep->dep->sysname,
dist_entry_channel_no(edep->dep));
- for (ep = ext; ep < edep->ext_endp; ep++)
+ for (ep = ext; ep < edep->data->ext_endp; ep++)
erts_dsprintf(dsbufp, ep != ext ? ",%b8u" : "<<%b8u", *ep);
erts_dsprintf(dsbufp, ">>");
erts_send_warning_to_logger_nogl(dsbufp);
@@ -913,9 +1042,9 @@ bad_dist_ext(ErtsDistExternal *edep)
erts_this_node->sysname,
dep->sysname,
dist_entry_channel_no(dep));
- for (ep = edep->extp; ep < edep->ext_endp; ep++)
+ for (ep = edep->data->extp; ep < edep->data->ext_endp; ep++)
erts_dsprintf(dsbufp,
- ep != edep->extp ? ",%b8u" : "<<...,%b8u",
+ ep != edep->data->extp ? ",%b8u" : "<<...,%b8u",
*ep);
erts_dsprintf(dsbufp, ">>\n");
erts_dsprintf(dsbufp, "ATOM_CACHE_REF translations: ");
@@ -933,30 +1062,32 @@ bad_dist_ext(ErtsDistExternal *edep)
}
Sint
-erts_decode_dist_ext_size(ErtsDistExternal *edep)
+erts_decode_dist_ext_size(ErtsDistExternal *edep, int kill_connection)
{
Sint res;
byte *ep;
- if (edep->extp >= edep->ext_endp)
+
+ if (edep->data->extp >= edep->data->ext_endp)
goto fail;
#ifndef ERTS_DEBUG_USE_DIST_SEP
if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) {
- if (*edep->extp == VERSION_MAGIC)
+ if (*edep->data->extp == VERSION_MAGIC)
goto fail;
- ep = edep->extp;
+ ep = edep->data->extp;
}
else
#endif
{
- if (*edep->extp != VERSION_MAGIC)
+ if (*edep->data->extp != VERSION_MAGIC)
goto fail;
- ep = edep->extp+1;
+ ep = edep->data->extp+1;
}
- res = decoded_size(ep, edep->ext_endp, 0, NULL);
+ res = decoded_size(ep, edep->data->ext_endp, 0, NULL);
if (res >= 0)
return res;
fail:
- bad_dist_ext(edep);
+ if (kill_connection)
+ bad_dist_ext(edep);
return -1;
}
@@ -982,12 +1113,15 @@ Sint erts_decode_ext_size_ets(byte *ext, Uint size)
*/
Eterm
erts_decode_dist_ext(ErtsHeapFactory* factory,
- ErtsDistExternal *edep)
+ ErtsDistExternal *edep,
+ int kill_connection)
{
Eterm obj;
- byte* ep = edep->extp;
+ byte* ep;
- if (ep >= edep->ext_endp)
+ ep = edep->data->extp;
+
+ if (ep >= edep->data->ext_endp)
goto error;
#ifndef ERTS_DEBUG_USE_DIST_SEP
if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) {
@@ -1005,14 +1139,15 @@ erts_decode_dist_ext(ErtsHeapFactory* factory,
if (!ep)
goto error;
- edep->extp = ep;
+ edep->data->extp = ep;
return obj;
error:
erts_factory_undo(factory);
- bad_dist_ext(edep);
+ if (kill_connection)
+ bad_dist_ext(edep);
return THE_NON_VALUE;
}
@@ -1057,6 +1192,7 @@ BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
Eterm res;
Sint hsz;
ErtsDistExternal ede;
+ ErtsDistExternalData ede_data;
Eterm *tp;
Eterm real_bin;
Uint offset;
@@ -1069,7 +1205,8 @@ BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
ede.flags = ERTS_DIST_EXT_ATOM_TRANS_TAB;
ede.dep = NULL;
ede.heap_size = -1;
-
+ ede.data = &ede_data;
+
if (is_not_tuple(BIF_ARG_1))
goto badarg;
tp = tuple_val(BIF_ARG_1);
@@ -1094,15 +1231,15 @@ BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
if (bitsize != 0)
goto badarg;
- ede.extp = binary_bytes(real_bin)+offset;
- ede.ext_endp = ede.extp + size;
+ ede.data->extp = binary_bytes(real_bin)+offset;
+ ede.data->ext_endp = ede.data->extp + size;
- hsz = erts_decode_dist_ext_size(&ede);
+ hsz = erts_decode_dist_ext_size(&ede, 1);
if (hsz < 0)
goto badarg;
erts_factory_proc_prealloc_init(&factory, BIF_P, hsz);
- res = erts_decode_dist_ext(&factory, &ede);
+ res = erts_decode_dist_ext(&factory, &ede, 1);
erts_factory_close(&factory);
if (is_value(res))
@@ -1117,9 +1254,22 @@ static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1)
{
Eterm *tp = tuple_val(BIF_ARG_1);
Eterm Term = tp[1];
- Eterm bt = tp[2];
+ Eterm Opts = tp[2];
+ Eterm bt = tp[3];
Binary *bin = erts_magic_ref2bin(bt);
- Eterm res = erts_term_to_binary_int(BIF_P, Term, 0, 0,bin);
+ Eterm res = erts_term_to_binary_int(BIF_P, Term, Opts, 0, 0,bin);
+ if (is_non_value(res)) {
+ if (erts_set_gc_state(BIF_P, 1)
+ || MSO(BIF_P).overhead > BIN_VHEAP_SZ(BIF_P)) {
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ }
+ if (Opts == am_undefined)
+ ERTS_BIF_ERROR_TRAPPED1(BIF_P, SYSTEM_LIMIT,
+ bif_export[BIF_term_to_binary_1], Term);
+ else
+ ERTS_BIF_ERROR_TRAPPED2(BIF_P, SYSTEM_LIMIT,
+ bif_export[BIF_term_to_binary_2], Term, Opts);
+ }
if (is_tuple(res)) {
ASSERT(BIF_P->flags & F_DISABLE_GC);
BIF_TRAP1(&term_to_binary_trap_export,BIF_P,res);
@@ -1136,7 +1286,12 @@ HIPE_WRAPPER_BIF_DISABLE_GC(term_to_binary, 1)
BIF_RETTYPE term_to_binary_1(BIF_ALIST_1)
{
- Eterm res = erts_term_to_binary_int(BIF_P, BIF_ARG_1, 0, TERM_TO_BINARY_DFLAGS, NULL);
+ Eterm res = erts_term_to_binary_int(BIF_P, BIF_ARG_1, am_undefined,
+ 0, TERM_TO_BINARY_DFLAGS, NULL);
+ if (is_non_value(res)) {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ }
if (is_tuple(res)) {
erts_set_gc_state(BIF_P, 0);
BIF_TRAP1(&term_to_binary_trap_export,BIF_P,res);
@@ -1195,7 +1350,12 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
goto error;
}
- res = erts_term_to_binary_int(p, Term, level, flags, NULL);
+ res = erts_term_to_binary_int(p, Term, BIF_ARG_2,
+ level, flags, NULL);
+ if (is_non_value(res)) {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ }
if (is_tuple(res)) {
erts_set_gc_state(p, 0);
BIF_TRAP1(&term_to_binary_trap_export,BIF_P,res);
@@ -1744,8 +1904,17 @@ external_size_1(BIF_ALIST_1)
{
Process* p = BIF_P;
Eterm Term = BIF_ARG_1;
+ Uint size;
+
+ switch (erts_encode_ext_size(Term, &size)) {
+ case ERTS_EXT_SZ_SYSTEM_LIMIT:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ case ERTS_EXT_SZ_YIELD:
+ ERTS_INTERNAL_ERROR("Unexpected yield");
+ case ERTS_EXT_SZ_OK:
+ break;
+ }
- Uint size = erts_encode_ext_size(Term);
if (IS_USMALL(0, size)) {
BIF_RET(make_small(size));
} else {
@@ -1788,7 +1957,15 @@ external_size_2(BIF_ALIST_2)
goto error;
}
- size = erts_encode_ext_size_2(BIF_ARG_1, flags);
+ switch (erts_encode_ext_size_2(BIF_ARG_1, flags, &size)) {
+ case ERTS_EXT_SZ_SYSTEM_LIMIT:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ case ERTS_EXT_SZ_YIELD:
+ ERTS_INTERNAL_ERROR("Unexpected yield");
+ case ERTS_EXT_SZ_OK:
+ break;
+ }
+
if (IS_USMALL(0, size)) {
BIF_RET(make_small(size));
} else {
@@ -1876,7 +2053,15 @@ erts_term_to_binary_simple(Process* p, Eterm Term, Uint size, int level, Uint fl
Eterm
erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags) {
Uint size;
- size = encode_size_struct2(NULL, Term, flags) + 1 /* VERSION_MAGIC */;
+ switch (encode_size_struct_int(NULL, NULL, Term, flags, NULL, &size)) {
+ case ERTS_EXT_SZ_SYSTEM_LIMIT:
+ return THE_NON_VALUE;
+ case ERTS_EXT_SZ_YIELD:
+ ERTS_INTERNAL_ERROR("Unexpected yield");
+ case ERTS_EXT_SZ_OK:
+ break;
+ }
+ size++; /* VERSION_MAGIC */;
return erts_term_to_binary_simple(p, Term, size, level, flags);
}
@@ -1926,8 +2111,8 @@ static int ttb_context_destructor(Binary *context_bin)
return 1;
}
-static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint flags,
- Binary *context_b)
+static Eterm erts_term_to_binary_int(Process* p, Eterm Term, Eterm opts, int level,
+ Uint flags, Binary *context_b)
{
Eterm *hp;
Eterm res;
@@ -1945,18 +2130,17 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
do { \
if (context_b == NULL) { \
context_b = erts_create_magic_binary(sizeof(TTBContext), \
- ttb_context_destructor); \
+ ttb_context_destructor);\
context = ERTS_MAGIC_BIN_DATA(context_b); \
- sys_memcpy(context,&c_buff,sizeof(TTBContext)); \
+ sys_memcpy(context,&c_buff,sizeof(TTBContext)); \
} \
} while (0)
#define RETURN_STATE() \
do { \
- static const int TUPLE2_SIZE = 2 + 1; \
- hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE + TUPLE2_SIZE); \
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE + 1 + 3); \
c_term = erts_mk_magic_ref(&hp, &MSO(p), context_b); \
- res = TUPLE2(hp, Term, c_term); \
+ res = TUPLE3(hp, Term, opts, c_term); \
BUMP_ALL_REDS(p); \
return res; \
} while (0);
@@ -1982,11 +2166,17 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
int level;
Uint flags;
/* Try for fast path */
- if (encode_size_struct_int(&context->s.sc, NULL, Term,
- context->s.sc.flags, &reds, &size) < 0) {
+ switch (encode_size_struct_int(&context->s.sc, NULL, Term,
+ context->s.sc.flags, &reds, &size)) {
+ case ERTS_EXT_SZ_SYSTEM_LIMIT:
+ BUMP_REDS(p, (initial_reds - reds) / TERM_TO_BINARY_LOOP_FACTOR);
+ return THE_NON_VALUE;
+ case ERTS_EXT_SZ_YIELD:
EXPORT_CONTEXT();
/* Same state */
RETURN_STATE();
+ case ERTS_EXT_SZ_OK:
+ break;
}
++size; /* VERSION_MAGIC */
/* Move these to next state */
@@ -2348,7 +2538,7 @@ dec_atom(ErtsDistExternal *edep, byte* ep, Eterm* objp)
return ep;
}
-static ERTS_INLINE ErlNode* dec_get_node(Eterm sysname, Uint32 creation)
+static ERTS_INLINE ErlNode* dec_get_node(Eterm sysname, Uint32 creation, Eterm book)
{
if (sysname == INTERNAL_LOCAL_SYSNAME) /* && DFLAG_INTERNAL_TAGS */
return erts_this_node;
@@ -2357,7 +2547,7 @@ static ERTS_INLINE ErlNode* dec_get_node(Eterm sysname, Uint32 creation)
&& (creation == erts_this_node->creation || creation == ORIG_CREATION))
return erts_this_node;
- return erts_find_or_insert_node(sysname,creation);
+ return erts_find_or_insert_node(sysname,creation,book);
}
static byte*
@@ -2403,7 +2593,7 @@ dec_pid(ErtsDistExternal *edep, ErtsHeapFactory* factory, byte* ep,
* We are careful to create the node entry only after all
* validity tests are done.
*/
- node = dec_get_node(sysname, cre);
+ node = dec_get_node(sysname, cre, make_boxed(factory->hp));
if(node == erts_this_node) {
*objp = make_internal_pid(data);
@@ -3397,7 +3587,7 @@ dec_term_atom_common:
cre = get_int32(ep);
ep += 4;
}
- node = dec_get_node(sysname, cre);
+ node = dec_get_node(sysname, cre, make_boxed(hp));
if(node == erts_this_node) {
*objp = make_internal_port(num);
}
@@ -3468,7 +3658,7 @@ dec_term_atom_common:
cre = get_int32(ep);
ep += 4;
- r0 = get_int32(ep); /* allow full word */
+ r0 = get_int32(ep);
ep += 4;
ref_ext_common: {
@@ -3477,8 +3667,15 @@ dec_term_atom_common:
if (ref_words > ERTS_MAX_REF_NUMBERS)
goto error;
- node = dec_get_node(sysname, cre);
+ node = dec_get_node(sysname, cre, make_boxed(hp));
if(node == erts_this_node) {
+ if (r0 >= MAX_REFERENCE) {
+ /*
+ * Must reject local refs with more than 18 bits
+ * in first word as magic ref table relies on it.
+ */
+ goto error;
+ }
rtp = (ErtsORefThing *) hp;
ref_num = &rtp->num[0];
@@ -4041,13 +4238,21 @@ error_hamt:
to a sequence of bytes
N.B. That this must agree with to_external2() above!!!
(except for cached atoms) */
-static Uint encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags) {
- Uint res;
- (void) encode_size_struct_int(NULL, acmp, obj, dflags, NULL, &res);
- return res;
+static Uint encode_size_struct2(ErtsAtomCacheMap *acmp,
+ Eterm obj,
+ unsigned dflags) {
+ Uint size;
+ ErtsExtSzRes res = encode_size_struct_int(NULL, acmp, obj,
+ dflags, NULL, &size);
+ /*
+ * encode_size_struct2() only allowed when
+ * we know the result will always be OK!
+ */
+ ASSERT(res == ERTS_EXT_SZ_OK); (void) res;
+ return (Uint) size;
}
-static int
+static ErtsExtSzRes
encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
unsigned dflags, Sint *reds, Uint *res)
{
@@ -4080,7 +4285,7 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
ctx->obj = obj;
ctx->result = result;
WSTACK_SAVE(s, &ctx->wstack);
- return -1;
+ return ERTS_EXT_SZ_YIELD;
}
switch (tag_val_def(obj)) {
case NIL_DEF:
@@ -4256,11 +4461,26 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
result += 32; /* Yes, including the tag */
}
break;
- case BINARY_DEF:
- if (dflags & DFLAG_INTERNAL_TAGS) {
+ case BINARY_DEF: {
+ ProcBin* pb = (ProcBin*) binary_val(obj);
+ Uint tot_bytes = pb->size;
+ if (!(dflags & DFLAG_INTERNAL_TAGS)) {
+#ifdef ARCH_64
+ if (tot_bytes >= (Uint) 0xffffffff) {
+ if (pb->thing_word == HEADER_SUB_BIN) {
+ ErlSubBin* sub = (ErlSubBin*) pb;
+ tot_bytes += (sub->bitoffs + sub->bitsize+ 7) / 8;
+ }
+ if (tot_bytes > (Uint) 0xffffffff) {
+ WSTACK_DESTROY(s);
+ return ERTS_EXT_SZ_SYSTEM_LIMIT;
+ }
+ }
+#endif
+ }
+ else {
ProcBin* pb = (ProcBin*) binary_val(obj);
Uint sub_extra = 0;
- Uint tot_bytes = pb->size;
if (pb->thing_word == HEADER_SUB_BIN) {
ErlSubBin* sub = (ErlSubBin*) pb;
pb = (ProcBin*) binary_val(sub->orig);
@@ -4277,6 +4497,7 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
result += 1 + 4 + binary_size(obj) +
5; /* For unaligned binary */
break;
+ }
case FUN_DEF:
{
ErlFunThing* funp = (ErlFunThing *) fun_val(obj);
@@ -4309,7 +4530,7 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
break;
default:
- erts_exit(ERTS_ERROR_EXIT,"Internal data structure error (in encode_size_struct2)%x\n",
+ erts_exit(ERTS_ERROR_EXIT,"Internal data structure error (in encode_size_struct_int) %x\n",
obj);
}
@@ -4349,7 +4570,7 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
*reds = r < 0 ? 0 : r;
}
*res = result;
- return 0;
+ return ERTS_EXT_SZ_OK;
}
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index edac177cc6..b556c9076c 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -58,6 +58,8 @@
#define SMALL_ATOM_UTF8_EXT 'w'
#define DIST_HEADER 'D'
+#define DIST_FRAG_HEADER 'E'
+#define DIST_FRAG_CONT 'F'
#define ATOM_CACHE_REF 'R'
#define ATOM_INTERNAL_REF2 'I'
#define ATOM_INTERNAL_REF3 'K'
@@ -122,24 +124,26 @@ typedef struct {
#define ERTS_DIST_CON_ID_MASK ((Uint32) 0x00ffffff) /* also in net_kernel.erl */
-typedef struct {
- DistEntry *dep;
+struct binary;
+typedef struct erl_dist_external_data ErtsDistExternalData;
+
+struct erl_dist_external_data {
+ Uint64 seq_id;
+ Uint64 frag_id;
byte *extp;
byte *ext_endp;
+ struct binary *binp;
+};
+
+typedef struct erl_dist_external {
Sint heap_size;
- Uint32 connection_id;
+ DistEntry *dep;
Uint32 flags;
+ Uint32 connection_id;
+ ErtsDistExternalData *data;
ErtsAtomTranslationTable attab;
} ErtsDistExternal;
-#define ERTS_DIST_EXT_SIZE(EDEP) \
- (sizeof(ErtsDistExternal) \
- - (((EDEP)->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB) \
- ? (ASSERT(0 <= (EDEP)->attab.size \
- && (EDEP)->attab.size <= ERTS_ATOM_CACHE_SIZE), \
- sizeof(Eterm)*(ERTS_ATOM_CACHE_SIZE - (EDEP)->attab.size)) \
- : sizeof(ErtsAtomTranslationTable)))
-
typedef struct {
byte *extp;
int exttmp;
@@ -155,36 +159,48 @@ void erts_reset_atom_cache_map(ErtsAtomCacheMap *);
void erts_destroy_atom_cache_map(ErtsAtomCacheMap *);
void erts_finalize_atom_cache_map(ErtsAtomCacheMap *, Uint32);
-Uint erts_encode_ext_dist_header_size(ErtsAtomCacheMap *);
-byte *erts_encode_ext_dist_header_setup(byte *, ErtsAtomCacheMap *);
+Uint erts_encode_ext_dist_header_size(ErtsAtomCacheMap *, Uint);
+byte *erts_encode_ext_dist_header_setup(byte *, ErtsAtomCacheMap *, Uint, Eterm);
+byte *erts_encode_ext_dist_header_fragment(byte **, Uint, Eterm);
Sint erts_encode_ext_dist_header_finalize(ErtsDistOutputBuf*, DistEntry *, Uint32 dflags, Sint reds);
struct erts_dsig_send_context;
-int erts_encode_dist_ext_size(Eterm, Uint32, ErtsAtomCacheMap*, Uint* szp);
-int erts_encode_dist_ext_size_int(Eterm term, struct erts_dsig_send_context* ctx, Uint* szp);
+
+typedef enum {
+ ERTS_EXT_SZ_OK,
+ ERTS_EXT_SZ_YIELD,
+ ERTS_EXT_SZ_SYSTEM_LIMIT
+} ErtsExtSzRes;
+
+ErtsExtSzRes erts_encode_dist_ext_size(Eterm, Uint32, ErtsAtomCacheMap*, Uint* szp);
+ErtsExtSzRes erts_encode_dist_ext_size_ctx(Eterm term, struct erts_dsig_send_context* ctx, Uint* szp);
struct TTBEncodeContext_;
int erts_encode_dist_ext(Eterm, byte **, Uint32, ErtsAtomCacheMap *,
struct TTBEncodeContext_ *, Sint* reds);
-Uint erts_encode_ext_size(Eterm);
-Uint erts_encode_ext_size_2(Eterm, unsigned);
+ErtsExtSzRes erts_encode_ext_size(Eterm, Uint *szp);
+ErtsExtSzRes erts_encode_ext_size_2(Eterm, unsigned, Uint *szp);
Uint erts_encode_ext_size_ets(Eterm);
void erts_encode_ext(Eterm, byte **);
byte* erts_encode_ext_ets(Eterm, byte *, struct erl_off_heap_header** ext_off_heap);
-ERTS_GLB_INLINE void erts_free_dist_ext_copy(ErtsDistExternal *);
-ERTS_GLB_INLINE void *erts_dist_ext_trailer(ErtsDistExternal *);
-ErtsDistExternal *erts_make_dist_ext_copy(ErtsDistExternal *, Uint);
-void *erts_dist_ext_trailer(ErtsDistExternal *);
-void erts_destroy_dist_ext_copy(ErtsDistExternal *);
-
-#define ERTS_PREP_DIST_EXT_FAILED (-1)
-#define ERTS_PREP_DIST_EXT_SUCCESS (0)
-#define ERTS_PREP_DIST_EXT_CLOSED (1)
-
-int erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint,
- DistEntry *, Uint32 conn_id, ErtsAtomCache *);
-Sint erts_decode_dist_ext_size(ErtsDistExternal *);
-Eterm erts_decode_dist_ext(ErtsHeapFactory* factory, ErtsDistExternal *);
+Uint erts_dist_ext_size(ErtsDistExternal *);
+Uint erts_dist_ext_data_size(ErtsDistExternal *);
+void erts_free_dist_ext_copy(ErtsDistExternal *);
+void erts_make_dist_ext_copy(ErtsDistExternal *, ErtsDistExternal *);
+void erts_dist_ext_frag(ErtsDistExternalData *, ErtsDistExternal *);
+#define erts_get_dist_ext(HFRAG) ((ErtsDistExternal*)((HFRAG)->mem + (HFRAG)->used_size))
+
+typedef enum {
+ ERTS_PREP_DIST_EXT_FAILED,
+ ERTS_PREP_DIST_EXT_SUCCESS,
+ ERTS_PREP_DIST_EXT_FRAG_CONT,
+ ERTS_PREP_DIST_EXT_CLOSED
+} ErtsPrepDistExtRes;
+
+ErtsPrepDistExtRes erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint, struct binary *,
+ DistEntry *, Uint32, ErtsAtomCache *);
+Sint erts_decode_dist_ext_size(ErtsDistExternal *, int);
+Eterm erts_decode_dist_ext(ErtsHeapFactory*, ErtsDistExternal *, int);
Sint erts_decode_ext_size(byte*, Uint);
Sint erts_decode_ext_size_ets(byte*, Uint);
@@ -200,25 +216,4 @@ int erts_debug_max_atom_out_cache_index(void);
int erts_debug_atom_to_out_cache_index(Eterm);
void transcode_free_ctx(DistEntry* dep);
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE void
-erts_free_dist_ext_copy(ErtsDistExternal *edep)
-{
- if (edep->dep)
- erts_deref_dist_entry(edep->dep);
- erts_free(ERTS_ALC_T_EXT_TERM_DATA, edep);
-}
-
-ERTS_GLB_INLINE void *
-erts_dist_ext_trailer(ErtsDistExternal *edep)
-{
- void *res = (void *) (edep->ext_endp
- + ERTS_EXTRA_DATA_ALIGN_SZ(edep->ext_endp));
- ASSERT((((UWord) res) % sizeof(Uint)) == 0);
- return res;
-}
-
-#endif
-
#endif /* ERL_EXTERNAL_H__ */
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index f564472081..4c8d3d3dbe 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -134,6 +134,7 @@ extern Eterm erts_nif_call_function(Process *p, Process *tracee,
int erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p,
BeamInstr *I, Eterm *reg);
+ErtsMessage* erts_create_message_from_nif_env(ErlNifEnv* msg_env);
/* Driver handle (wrapper for old plain handle) */
@@ -295,7 +296,6 @@ union erl_off_heap_ptr {
/* controls warning mapping in error_logger */
extern Eterm node_cookie;
-extern Uint display_items; /* no of items to display in traces etc */
extern int erts_backtrace_depth;
extern erts_atomic32_t erts_max_gen_gcs;
@@ -895,6 +895,11 @@ void erts_init_bif(void);
Eterm erl_send(Process *p, Eterm to, Eterm msg);
int erts_set_group_leader(Process *proc, Eterm new_gl);
+/* erl_bif_guard.c */
+
+void erts_init_bif_guard(void);
+Eterm erts_trapping_length_1(Process* p, Eterm* args);
+
/* erl_bif_op.c */
Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
@@ -1091,7 +1096,7 @@ extern int distribution_info(fmtfn_t, void *);
extern int is_node_name_atom(Eterm a);
extern int erts_net_message(Port *, DistEntry *, Uint32 conn_id,
- byte *, ErlDrvSizeT, byte *, ErlDrvSizeT);
+ byte *, ErlDrvSizeT, Binary *, byte *, ErlDrvSizeT);
extern void init_dist(void);
extern int stop_dist(void);
@@ -1211,10 +1216,11 @@ Uint64 erts_timestamp_millis(void);
Export* erts_find_function(Eterm, Eterm, unsigned int, ErtsCodeIndex);
-void *erts_calc_stacklimit(char *prev_c, UWord stacksize);
-int erts_check_below_limit(char *ptr, char *limit);
-int erts_check_above_limit(char *ptr, char *limit);
-void *erts_ptr_id(void *ptr);
+/* ERTS_NOINLINE prevents link-time optimization across modules */
+void *erts_calc_stacklimit(char *prev_c, UWord stacksize) ERTS_NOINLINE;
+int erts_check_below_limit(char *ptr, char *limit) ERTS_NOINLINE;
+int erts_check_above_limit(char *ptr, char *limit) ERTS_NOINLINE;
+void *erts_ptr_id(void *ptr) ERTS_NOINLINE;
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
diff --git a/erts/emulator/beam/instrs.tab b/erts/emulator/beam/instrs.tab
index 42c1168f85..7cffe7fb5c 100644
--- a/erts/emulator/beam/instrs.tab
+++ b/erts/emulator/beam/instrs.tab
@@ -238,6 +238,7 @@ HANDLE_APPLY_FUN_ERROR() {
}
DISPATCH_FUN(I) {
+ //| -no_next
SET_I($I);
Dispatchfun();
}
@@ -299,6 +300,7 @@ i_call_fun_last(Fun, Deallocate) {
}
return() {
+ //| -no_next
SET_I(c_p->cp);
DTRACE_RETURN_FROM_PC(c_p);
@@ -357,7 +359,7 @@ i_get_tuple_element2(Src, Element, Dst) {
dst[1] = E2;
}
-i_get_tuple_element2y(Src, Element, D1, D2) {
+i_get_tuple_element2_dst(Src, Element, D1, D2) {
Eterm* src;
Eterm E1, E2;
src = ADD_BYTE_OFFSET(tuple_val($Src), $Element);
@@ -434,6 +436,30 @@ init(Y) {
make_blank($Y);
}
+init_seq3(Y1) {
+ Eterm* dst = &$Y1;
+ make_blank(dst[0]);
+ make_blank(dst[1]);
+ make_blank(dst[2]);
+}
+
+init_seq4(Y1) {
+ Eterm* dst = &$Y1;
+ make_blank(dst[0]);
+ make_blank(dst[1]);
+ make_blank(dst[2]);
+ make_blank(dst[3]);
+}
+
+init_seq5(Y1) {
+ Eterm* dst = &$Y1;
+ make_blank(dst[0]);
+ make_blank(dst[1]);
+ make_blank(dst[2]);
+ make_blank(dst[3]);
+ make_blank(dst[4]);
+}
+
init2(Y1, Y2) {
make_blank($Y1);
make_blank($Y2);
@@ -451,6 +477,13 @@ i_make_fun(FunP, NumFree) {
HEAVY_SWAPIN;
}
+move_trim(Src, Dst, Words) {
+ Uint cp = E[0];
+ $Dst = $Src;
+ E += $Words;
+ E[0] = cp;
+}
+
i_trim(Words) {
Uint cp = E[0];
E += $Words;
@@ -467,10 +500,6 @@ move3(S1, D1, S2, D2, S3, D3) {
$D3 = $S3;
}
-move_dup(Src, D1, D2) {
- $D1 = $D2 = $Src;
-}
-
move2_par(S1, D1, S2, D2) {
Eterm V1, V2;
V1 = $S1;
@@ -486,6 +515,48 @@ move_shift(Src, SD, D) {
$SD = V;
}
+move_src_window2(Src, D1, D2) {
+ Eterm* src = &$Src;
+ Eterm s1, s2;
+ s1 = src[0];
+ s2 = src[1];
+ $D1 = s1;
+ $D2 = s2;
+}
+
+move_src_window3(Src, D1, D2, D3) {
+ Eterm* src = &$Src;
+ Eterm s1, s2, s3;
+ s1 = src[0];
+ s2 = src[1];
+ s3 = src[2];
+ $D1 = s1;
+ $D2 = s2;
+ $D3 = s3;
+}
+
+move_src_window4(Src, D1, D2, D3, D4) {
+ Eterm* src = &$Src;
+ Eterm s1, s2, s3, s4;
+ s1 = src[0];
+ s2 = src[1];
+ s3 = src[2];
+ s4 = src[3];
+ $D1 = s1;
+ $D2 = s2;
+ $D3 = s3;
+ $D4 = s4;
+}
+
+move_window2(S1, S2, D) {
+ Eterm xt0, xt1;
+ Eterm* y = &$D;
+ xt0 = $S1;
+ xt1 = $S2;
+ y[0] = xt0;
+ y[1] = xt1;
+}
+
move_window3(S1, S2, S3, D) {
Eterm xt0, xt1, xt2;
Eterm* y = &$D;
@@ -559,17 +630,19 @@ update_list(Hd, Dst) {
HTOP += 2;
}
-i_put_tuple := i_put_tuple.make.fill;
-
-i_put_tuple.make(Dst) {
- $Dst = make_tuple(HTOP);
-}
-
-i_put_tuple.fill(Arity) {
+put_tuple2(Dst, Arity) {
Eterm* hp = HTOP;
Eterm arity = $Arity;
+ /*
+ * If operands are not packed (in the 32-bit VM),
+ * is is not safe to use $Dst directly after I
+ * has been updated.
+ */
+ Eterm* dst_ptr = &($Dst);
+
//| -no_next
+ ASSERT(arity != 0);
*hp++ = make_arityval(arity);
I = $NEXT_INSTRUCTION;
do {
@@ -586,6 +659,7 @@ i_put_tuple.fill(Arity) {
break;
}
} while (--arity != 0);
+ *dst_ptr = make_tuple(HTOP);
HTOP = hp;
ASSERT(VALID_INSTR(* (Eterm *)I));
Goto(*I);
@@ -637,12 +711,6 @@ is_nonempty_list(Fail, Src) {
}
}
-is_nonempty_list_test_heap(Fail, Need, Live) {
- //| -no_prefetch
- $is_nonempty_list($Fail, x(0));
- $test_heap($Need, $Live);
-}
-
is_nonempty_list_allocate(Fail, Src, Need, Live) {
//| -no_prefetch
$is_nonempty_list($Fail, $Src);
@@ -655,13 +723,27 @@ is_nonempty_list_get_list(Fail, Src, Hd, Tl) {
$get_list($Src, $Hd, $Tl);
}
+is_nonempty_list_get_hd(Fail, Src, Hd) {
+ //| -no_prefetch
+ $is_nonempty_list($Fail, $Src);
+ $get_hd($Src, $Hd);
+}
+
+is_nonempty_list_get_tl(Fail, Src, Tl) {
+ //| -no_prefetch
+ $is_nonempty_list($Fail, $Src);
+ $get_tl($Src, $Tl);
+}
+
jump(Fail) {
$JUMP($Fail);
}
-move_jump(Fail, Src) {
- x(0) = $Src;
- $jump($Fail);
+move_jump(Lbl, Src, Dst) {
+ Eterm lbl = $Lbl;
+ Eterm src = $Src;
+ $Dst = src;
+ $JUMP(lbl);
}
//
@@ -704,12 +786,18 @@ is_function(Fail, Src) {
}
}
-is_function2(Fail, Fun, Arity) {
+cold_is_function2(Fail, Fun, Arity) {
if (erl_is_function(c_p, $Fun, $Arity) != am_true ) {
$FAIL($Fail);
}
}
+hot_is_function2(Fail, Fun, Arity) {
+ if (!is_function2($Fun, $Arity)) {
+ $FAIL($Fail);
+ }
+}
+
is_integer(Fail, Src) {
if (is_not_integer($Src)) {
$FAIL($Fail);
@@ -767,6 +855,16 @@ is_tagged_tuple(Fail, Src, Arityval, Tag) {
}
}
+is_tagged_tuple_ff(NotTupleFail, WrongRecordFail, Src, Arityval, Tag) {
+ Eterm term = $Src;
+ if (is_not_tuple(term)) {
+ $FAIL($NotTupleFail);
+ } else if (ERTS_UNLIKELY((tuple_val(term))[0] != $Arityval ||
+ (tuple_val(term))[1] != $Tag)) {
+ $FAIL($WrongRecordFail);
+ }
+}
+
is_tuple(Fail, Src) {
if (is_not_tuple($Src)) {
$FAIL($Fail);
@@ -786,6 +884,16 @@ test_arity(Fail, Pointer, Arity) {
}
}
+test_arity_get_tuple_element(Fail, Pointer, Arity, Pos, Dst) {
+ Eterm* ptr = tuple_val($Pointer);
+ Eterm* src;
+ if (*ptr != $Arity) {
+ $FAIL($Fail);
+ }
+ src = ADD_BYTE_OFFSET(ptr, $Pos);
+ $Dst = *src;
+}
+
i_is_eq_exact_immed(Fail, X, Y) {
if ($X != $Y) {
$FAIL($Fail);
@@ -824,12 +932,16 @@ i_is_ne_exact_literal(Fail, Src, Literal) {
}
}
-is_eq(Fail, X, Y) {
- CMP_EQ_ACTION($X, $Y, $FAIL($Fail));
+is_eq(Fail, A, B) {
+ Eterm a = $A;
+ Eterm b = $B;
+ CMP_EQ_ACTION(a, b, $FAIL($Fail));
}
-is_ne(Fail, X, Y) {
- CMP_NE_ACTION($X, $Y, $FAIL($Fail));
+is_ne(Fail, A, B) {
+ Eterm a = $A;
+ Eterm b = $B;
+ CMP_NE_ACTION(a, b, $FAIL($Fail));
}
is_lt(Fail, X, Y) {
@@ -889,7 +1001,7 @@ catch_end(Y) {
}
/* only x(2) is included in the rootset here */
if (E - HTOP < 3) {
- SWAPOUT;
+ $GC_SWAPOUT();
PROCESS_MAIN_CHK_LOCKS(c_p);
FCALLS -= erts_garbage_collect_nobump(c_p, 3, reg+2, 1, FCALLS);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -948,22 +1060,34 @@ build_stacktrace() {
}
raw_raise() {
+ //| -no_prefetch
Eterm class = x(0);
Eterm value = x(1);
Eterm stacktrace = x(2);
+ Eterm* freason_ptr;
+
+ /*
+ * Note that the i_raise instruction will override c_p->freason
+ * with the freason field stored inside the StackTrace struct in
+ * ftrace. Therefore, we must take care to store the class both
+ * inside the StackTrace struct and in c_p->freason (important if
+ * the class is different from the class of the original
+ * exception).
+ */
+ freason_ptr = get_freason_ptr_from_exc(stacktrace);
if (class == am_error) {
- c_p->freason = EXC_ERROR & ~EXF_SAVETRACE;
+ *freason_ptr = c_p->freason = EXC_ERROR & ~EXF_SAVETRACE;
c_p->fvalue = value;
c_p->ftrace = stacktrace;
goto find_func_info;
} else if (class == am_exit) {
- c_p->freason = EXC_EXIT & ~EXF_SAVETRACE;
+ *freason_ptr = c_p->freason = EXC_EXIT & ~EXF_SAVETRACE;
c_p->fvalue = value;
c_p->ftrace = stacktrace;
goto find_func_info;
} else if (class == am_throw) {
- c_p->freason = EXC_THROWN & ~EXF_SAVETRACE;
+ *freason_ptr = c_p->freason = EXC_THROWN & ~EXF_SAVETRACE;
c_p->fvalue = value;
c_p->ftrace = stacktrace;
goto find_func_info;
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 7322239a73..45fef0c0e5 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -3644,20 +3644,22 @@ typedef struct {
Eterm reason;
} ErtsPortExitContext;
-static void link_port_exit(ErtsLink *lnk, void *vpectxt)
+static int link_port_exit(ErtsLink *lnk, void *vpectxt, Sint reds)
{
ErtsPortExitContext *pectxt = vpectxt;
erts_proc_sig_send_link_exit(NULL, pectxt->port_id,
lnk, pectxt->reason, NIL);
+ return 1;
}
-static void monitor_port_exit(ErtsMonitor *mon, void *vpectxt)
+static int monitor_port_exit(ErtsMonitor *mon, void *vpectxt, Sint reds)
{
ErtsPortExitContext *pectxt = vpectxt;
if (erts_monitor_is_target(mon))
erts_proc_sig_send_monitor_down(mon, pectxt->reason);
else
erts_proc_sig_send_demonitor(mon);
+ return 1;
}
/* 'from' is sending 'this_port' an exit signal, (this_port must be internal).
@@ -4448,6 +4450,7 @@ erts_port_call(Process* c_p,
char input_buf[256];
char *bufp;
byte *endp;
+ Uint uintsz;
ErlDrvSizeT size;
int try_call;
erts_aint32_t sched_flags;
@@ -4460,7 +4463,9 @@ erts_port_call(Process* c_p,
try_call = !(sched_flags & ERTS_PTS_FLGS_FORCE_SCHEDULE_OP);
- size = erts_encode_ext_size(data);
+ if (erts_encode_ext_size(data, &uintsz) != ERTS_EXT_SZ_OK)
+ return ERTS_PORT_OP_BADARG;
+ size = (ErlDrvSizeT) uintsz;
if (!try_call)
bufp = erts_alloc(ERTS_ALC_T_DRV_CALL_DATA, size);
@@ -4836,7 +4841,7 @@ typedef struct {
void *arg;
} prt_one_lnk_data;
-static void prt_one_monitor(ErtsMonitor *mon, void *vprtd)
+static int prt_one_monitor(ErtsMonitor *mon, void *vprtd, Sint reds)
{
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
prt_one_lnk_data *prtd = (prt_one_lnk_data *) vprtd;
@@ -4844,12 +4849,14 @@ static void prt_one_monitor(ErtsMonitor *mon, void *vprtd)
erts_print(prtd->to, prtd->arg, "(%p,%T)", mon->other.ptr, mdp->ref);
else
erts_print(prtd->to, prtd->arg, "(%T,%T)", mon->other.item, mdp->ref);
+ return 1;
}
-static void prt_one_lnk(ErtsLink *lnk, void *vprtd)
+static int prt_one_lnk(ErtsLink *lnk, void *vprtd, Sint reds)
{
prt_one_lnk_data *prtd = (prt_one_lnk_data *) vprtd;
erts_print(prtd->to, prtd->arg, "%T", lnk->other.item);
+ return 1;
}
static void dump_port_state(fmtfn_t to, void *arg, erts_aint32_t state)
@@ -5100,7 +5107,7 @@ erts_port_resume_procs(Port *prt)
erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", prt->common.id);
while (plp2 != NULL) {
- erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)), "%T", plp2->pid);
+ erts_snprintf(pid_str, sizeof(DTRACE_CHARBUF_NAME(pid_str)), "%T", plp2->u.pid);
DTRACE2(process_port_unblocked, pid_str, port_str);
}
}
@@ -5291,44 +5298,31 @@ erts_get_port_names(Eterm id, ErlDrvPort drv_port)
pnp->driver_name = NULL;
}
else {
- int do_realloc = 1;
- int len = -1;
- size_t pnp_len = sizeof(ErtsPortNames);
-#ifndef DEBUG
- pnp_len += 100; /* In most cases 100 characters will be enough... */
- ASSERT(prt->common.id == id);
-#endif
- pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, pnp_len);
- do {
- int nlen;
- char *name, *driver_name;
- if (len > 0) {
- erts_free(ERTS_ALC_T_PORT_NAMES, pnp);
- pnp_len = sizeof(ErtsPortNames) + len;
- pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, pnp_len);
- }
- name = prt->name;
- len = nlen = name ? sys_strlen(name) + 1 : 0;
- driver_name = (prt->drv_ptr ? prt->drv_ptr->name : NULL);
- len += driver_name ? sys_strlen(driver_name) + 1 : 0;
- if (len <= pnp_len - sizeof(ErtsPortNames)) {
- if (!name)
- pnp->name = NULL;
- else {
- pnp->name = ((char *) pnp) + sizeof(ErtsPortNames);
- sys_strcpy(pnp->name, name);
- }
- if (!driver_name)
- pnp->driver_name = NULL;
- else {
- pnp->driver_name = (((char *) pnp)
- + sizeof(ErtsPortNames)
- + nlen);
- sys_strcpy(pnp->driver_name, driver_name);
- }
- do_realloc = 0;
- }
- } while (do_realloc);
+ int len;
+ int nlen;
+ char *driver_name;
+
+ len = nlen = prt->name ? sys_strlen(prt->name) + 1 : 0;
+ driver_name = (prt->drv_ptr ? prt->drv_ptr->name : NULL);
+ len += driver_name ? sys_strlen(driver_name) + 1 : 0;
+
+ pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES,
+ sizeof(ErtsPortNames) + len);
+
+ if (!prt->name)
+ pnp->name = NULL;
+ else {
+ pnp->name = ((char *) pnp) + sizeof(ErtsPortNames);
+ sys_strcpy(pnp->name, prt->name);
+ }
+ if (!driver_name)
+ pnp->driver_name = NULL;
+ else {
+ pnp->driver_name = (((char *) pnp)
+ + sizeof(ErtsPortNames)
+ + nlen);
+ sys_strcpy(pnp->driver_name, driver_name);
+ }
}
return pnp;
}
@@ -6177,6 +6171,7 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
dep,
conn_id,
(byte*) hbuf, hlen,
+ ErlDrvBinary2Binary(bin),
(byte*) (bin->orig_bytes+offs), len);
}
else
@@ -6222,12 +6217,14 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
dep,
conn_id,
NULL, 0,
+ NULL,
(byte*) hbuf, hlen);
else
return erts_net_message(prt,
dep,
conn_id,
(byte*) hbuf, hlen,
+ NULL,
(byte*) buf, len);
}
else if (state & ERTS_PORT_SFLG_LINEBUF_IO)
diff --git a/erts/emulator/beam/macros.tab b/erts/emulator/beam/macros.tab
index 494fe8961e..1b5e5f66b0 100644
--- a/erts/emulator/beam/macros.tab
+++ b/erts/emulator/beam/macros.tab
@@ -63,10 +63,21 @@ JUMP(Fail) {
Goto(*I);
}
+GC_SWAPOUT() {
+ //
+ // Since a garbage collection is expensive anyway, we can afford
+ // to save the instruction counter so that the correct function will
+ // be pointed in the crash dump if the garbage collection fails
+ // because of insufficient memory.
+ //
+ SWAPOUT;
+ c_p->i = I;
+}
+
GC_TEST(Ns, Nh, Live) {
Uint need = $Nh + $Ns;
if (ERTS_UNLIKELY(E - HTOP < need)) {
- SWAPOUT;
+ $GC_SWAPOUT();
PROCESS_MAIN_CHK_LOCKS(c_p);
FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live, FCALLS);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
@@ -79,7 +90,7 @@ GC_TEST(Ns, Nh, Live) {
GC_TEST_PRESERVE(NeedHeap, Live, PreserveTerm) {
Uint need = $NeedHeap;
if (ERTS_UNLIKELY(E - HTOP < need)) {
- SWAPOUT;
+ $GC_SWAPOUT();
reg[$Live] = $PreserveTerm;
PROCESS_MAIN_CHK_LOCKS(c_p);
FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live+1, FCALLS);
diff --git a/erts/emulator/beam/map_instrs.tab b/erts/emulator/beam/map_instrs.tab
index c594a87298..5620d5a2d1 100644
--- a/erts/emulator/beam/map_instrs.tab
+++ b/erts/emulator/beam/map_instrs.tab
@@ -127,11 +127,21 @@ i_get_map_elements(Fail, Src, N) {
}
}
-update_map_assoc(Src, Dst, Live, N) {
+update_map_assoc := update_map_assoc.fetch.execute;
+
+update_map_assoc.head() {
+ Eterm map;
+}
+
+update_map_assoc.fetch(Src) {
+ map = $Src;
+}
+
+update_map_assoc.execute(Dst, Live, N) {
Eterm res;
Uint live = $Live;
- reg[live] = $Src;
+ reg[live] = map;
HEAVY_SWAPOUT;
res = erts_gc_update_map_assoc(c_p, reg, live, $N, $NEXT_INSTRUCTION);
HEAVY_SWAPIN;
@@ -141,11 +151,21 @@ update_map_assoc(Src, Dst, Live, N) {
$NEXT($NEXT_INSTRUCTION+$N);
}
-update_map_exact(Fail, Src, Dst, Live, N) {
+update_map_exact := update_map_exact.fetch.execute;
+
+update_map_exact.head() {
+ Eterm map;
+}
+
+update_map_exact.fetch(Src) {
+ map = $Src;
+}
+
+update_map_exact.execute(Fail, Dst, Live, N) {
Eterm res;
Uint live = $Live;
- reg[live] = $Src;
+ reg[live] = map;
HEAVY_SWAPOUT;
res = erts_gc_update_map_exact(c_p, reg, live, $N, $NEXT_INSTRUCTION);
HEAVY_SWAPIN;
diff --git a/erts/emulator/beam/msg_instrs.tab b/erts/emulator/beam/msg_instrs.tab
index a6a0dec451..b08466c830 100644
--- a/erts/emulator/beam/msg_instrs.tab
+++ b/erts/emulator/beam/msg_instrs.tab
@@ -138,8 +138,8 @@ i_loop_rec(Dest) {
if (ERTS_UNLIKELY(ERTS_SIG_IS_EXTERNAL_MSG(msgp))) {
FCALLS -= 10; /* FIXME: bump appropriate amount... */
- SWAPOUT; /* erts_decode_dist_message() may write to heap... */
- if (!erts_decode_dist_message(c_p, ERTS_PROC_LOCK_MAIN, msgp, 0)) {
+ SWAPOUT; /* erts_proc_sig_decode_dist() may write to heap... */
+ if (!erts_proc_sig_decode_dist(c_p, ERTS_PROC_LOCK_MAIN, msgp, 0)) {
/*
* A corrupt distribution message that we weren't able to decode;
* remove it...
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index e76d896ffc..e9107933f9 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2018. All Rights Reserved.
+# Copyright Ericsson AB 1997-2019. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -74,23 +74,19 @@ trace_jump W
return
+# To ensure that a "move Src x(0)" instruction can be combined with
+# the following call instruction, we need to make sure that there is
+# no line/1 instruction between the move and the call.
#
-# To ensure that a "move Src x(0)" instruction can be combined
-# with the following call instruction, we need to make sure that
-# there is no line/1 instruction between the move and the call.
-#
-# A tail-recursive call to an external function (non-BIF) will
-# never be saved on the stack, so there is no reason to keep
-# the line instruction. (The compiler did not remove the line
-# instruction because it cannot tell the difference between
-# BIFs and ordinary Erlang functions.)
-#
+# A tail-recursive call to an external function (BIF or non-BIF) will
+# never be saved on the stack, so there is no reason to keep the line
+# instruction.
move S X0=x==0 | line Loc | call_ext Ar Func => \
line Loc | move S X0 | call_ext Ar Func
-move S X0=x==0 | line Loc | call_ext_last Ar Func=u$is_not_bif D => \
+move S X0=x==0 | line Loc | call_ext_last Ar Func D => \
move S X0 | call_ext_last Ar Func D
-move S X0=x==0 | line Loc | call_ext_only Ar Func=u$is_not_bif => \
+move S X0=x==0 | line Loc | call_ext_only Ar Func => \
move S X0 | call_ext_only Ar Func
move S X0=x==0 | line Loc | call Ar Func => \
line Loc | move S X0 | call Ar Func
@@ -102,15 +98,18 @@ line I
allocate t t?
allocate_heap t I t?
-%cold
+# This instruction when a BIF is called tail-recursively when
+# ther is stack frame.
deallocate Q
-%hot
init y
allocate_zero t t?
allocate_heap_zero t I t?
+move Src=y Dst=x | trim N Remaining => move_trim Src Dst N
trim N Remaining => i_trim N
+
+move_trim y x t
i_trim t
test_heap I t?
@@ -118,11 +117,21 @@ test_heap I t?
allocate_heap S u==0 R => allocate S R
allocate_heap_zero S u==0 R => allocate_zero S R
-init2 y y
-init3 y y y
+init Y1 | init Y2 | init Y3 | succ(Y1,Y2) | succ(Y2,Y3) => init_seq3 Y1
+init_seq3 Y1 | init Y4 | succ3(Y1,Y4) => init_seq4 Y1
+init_seq4 Y1 | init Y5 | succ4(Y1,Y5) => init_seq5 Y1
+
+init_seq3 y
+init_seq4 y
+init_seq5 y
+
init Y1 | init Y2 | init Y3 => init3 Y1 Y2 Y3
init Y1 | init Y2 => init2 Y1 Y2
+init2 y y
+init3 y y y
+
+
# Selecting values
select_val S=aiq Fail=f Size=u Rest=* => const_select_val(S, Fail, Size, Rest)
@@ -205,14 +214,11 @@ set_tuple_element s S P
# Get tuple element
-i_get_tuple_element xy P x
-
-%cold
-i_get_tuple_element xy P y
-%hot
+i_get_tuple_element xy P xy
i_get_tuple_element2 x P x
-i_get_tuple_element2y x P y y
+i_get_tuple_element2_dst x P x x
+i_get_tuple_element2_dst x P y y
i_get_tuple_element3 x P x
@@ -258,12 +264,14 @@ system_limit j
# Move instructions.
#
-move C=cxy x==0 | jump Lbl => move_jump Lbl C
+move Src=cxy Dst=xy | jump Lbl => move_jump Lbl Src Dst
-move_jump f ncxy
+move_jump f cxy xy
+move_jump f c r
-# Movement to and from the stack is common
-# Try to pack as much as we can into one instruction
+
+# Movement to and from the stack is common.
+# Try to pack as much as we can into one instruction.
# Window move
move_window/5
@@ -274,6 +282,9 @@ move_window/6
move X1=x Y1=y | move X2=x Y2=y | move X3=x Y3=y | succ(Y1,Y2) | succ(Y2,Y3) => \
move_window X1 X2 X3 Y1 Y3
+move X1=x Y1=y | move X2=x Y2=y | succ(Y1,Y2) => \
+ move_window2 X1 X2 Y1
+
move_window X1=x X2=x X3=x Y1=y Y3=y | move X4=x Y4=y | succ(Y3,Y4) => \
move_window X1 X2 X3 X4 Y1 Y4
@@ -283,15 +294,54 @@ move_window X1=x X2=x X3=x X4=x Y1=y Y4=y | move X5=x Y5=y | succ(Y4,Y5) => \
move_window X1=x X2=x X3=x Y1=y Y3=y => move_window3 X1 X2 X3 Y1
move_window X1=x X2=x X3=x X4=x Y1=y Y4=y => move_window4 X1 X2 X3 X4 Y1
+move_window2 x x y
move_window3 x x x y
move_window4 x x x x y
move_window5 x x x x x y
+# y -> x
+
+move_src_window/4
+move_src_window/5
+
+move Y1=y X1=x | move Y2=y X2=x | succ(Y1, Y2) => \
+ move_src_window Y1 Y2 X1 X2
+
+move_src_window Y1 Y2 X1 X2 | move Y3=y X3=x | succ(Y2, Y3) => \
+ move_src_window Y1 Y3 X1 X2 X3
+move_src_window Y1 Y2 X1 X2 | move Y3=y X3=x | move Y4=y X4=x | succ(Y3, Y4) => \
+ move_src_window2 Y1 X1 X2 | move_src_window Y3 Y4 X3 X4
+move_src_window Y1 Y2 X1 X2 | move Y3=y X3=x => \
+ move3 Y1 X1 Y2 X2 Y3 X3
+
+move_src_window Y1 Y3 X1 X2 X3 | move Y4=y X4=x | succ(Y3, Y4) => \
+ move_src_window4 Y1 X1 X2 X3 X4
+
+move_src_window Y1 y X1 X2 => move_src_window2 Y1 X1 X2
+move_src_window Y1 y X1 X2 X3 => move_src_window3 Y1 X1 X2 X3
+
+move_src_window2 y x x
+move_src_window3 y x x x
+move_src_window4 y x x x x
+
# Swap registers.
-move R1=x Tmp=x | move R2=xy R1 | move Tmp R2 => swap_temp R1 R2 Tmp
+move R1=xy Tmp=x | move R2=xy R1 | move Tmp R2 => swap_temp R1 R2 Tmp
+
+# The compiler uses x(1022) when swapping registers. It will definitely
+# not be used again.
+swap_temp R1 R2 Tmp=x==1022 => swap R1 R2
+
+swap_temp R1 R2 Tmp | move Src Tmp => swap R1 R2 | move Src Tmp
swap_temp R1 R2 Tmp | line Loc | apply Live | is_killed_apply(Tmp, Live) => \
swap R1 R2 | line Loc | apply Live
+swap_temp R1 R2 Tmp | line Loc | apply_last Live D | is_killed_apply(Tmp, Live) => \
+ swap R1 R2 | line Loc | apply_last Live D
+
+swap_temp R1 R2 Tmp | line Loc | call_fun Live | is_killed_by_call_fun(Tmp, Live) => \
+ swap R1 R2 | line Loc | call_fun Live
+swap_temp R1 R2 Tmp | make_fun2 OldIndex=u | is_killed_by_make_fun(Tmp, OldIndex) => \
+ swap R1 R2 | make_fun2 OldIndex
swap_temp R1 R2 Tmp | line Loc | call Live Addr | is_killed(Tmp, Live) => \
swap R1 R2 | line Loc | call Live Addr
@@ -307,84 +357,112 @@ swap_temp R1 R2 Tmp | line Loc | call_ext_only Live Addr | \
swap_temp R1 R2 Tmp | line Loc | call_ext_last Live Addr D | \
is_killed(Tmp, Live) => swap R1 R2 | line Loc | call_ext_last Live Addr D
-swap_temp x xy x
+swap_temp R1 R2 Tmp | call_ext Live Addr | is_killed(Tmp, Live) => \
+ swap R1 R2 | call_ext Live Addr
+swap_temp R1 R2 Tmp | call_ext_only Live Addr | is_killed(Tmp, Live) => \
+ swap R1 R2 | call_ext_only Live Addr
+swap_temp R1 R2 Tmp | call_ext_last Live Addr D | is_killed(Tmp, Live) => \
+ swap R1 R2 | call_ext_last Live Addr D
+
+swap_temp R1 R2 Tmp | move Src Any | line Loc | call Live Addr | \
+ is_killed(Tmp, Live) | distinct(Tmp, Src) => \
+ swap R1 R2 | move Src Any | line Loc | call Live Addr
+swap_temp R1 R2 Tmp | move Src Any | line Loc | call_ext Live Addr | \
+ is_killed(Tmp, Live) | distinct(Tmp, Src) => \
+ swap R1 R2 | move Src Any | line Loc | call_ext Live Addr
+swap_temp R1 R2 Tmp | move Src Any | call_only Live Addr | \
+ is_killed(Tmp, Live) | distinct(Tmp, Src) => \
+ swap R1 R2 | move Src Any | call_only Live Addr
+swap_temp R1 R2 Tmp | move Src Any | line Loc | call_ext_only Live Addr | \
+ is_killed(Tmp, Live) | distinct(Tmp, Src) => \
+ swap R1 R2 | move Src Any | line Loc | call_ext_only Live Addr
+swap_temp R1 R2 Tmp | move Src Any | line Loc | call_fun Live | \
+ is_killed(Tmp, Live) | distinct(Tmp, Src) => \
+ swap R1 R2 | move Src Any | line Loc | call_fun Live
+
+swap_temp R1 R2 Tmp | line Loc | send | is_killed_by_send(Tmp) => \
+ swap R1 R2 | line Loc | send
+
+# swap_temp/3 with Y register operands are rare.
+swap_temp R1 R2=y Tmp => swap R1 R2 | move R2 Tmp
+swap_temp R1=y R2 Tmp => swap R1 R2 | move R2 Tmp
+
+swap R1=x R2=y => swap R2 R1
+
+swap_temp x x x
+
+swap xy x
+swap y y
+
+# move_shift
+
+move SD=x D=x | move Src=cxy SD=x | distinct(D, Src) => move_shift Src SD D
+move SD=y D=x | move Src=x SD=y | distinct(D, Src) => move_shift Src SD D
+move SD=y D=x | init SD | => move_shift n SD D
+move SD=x D=y | move Src=x SD=x | distinct(D, Src) => move_shift Src SD D
+move SD=x==0 D=y | move Src=y SD=x==0 | distinct(D, Src) => move_shift Src SD D
+
+move_shift cxy x x
+move_shift nx y x
+move_shift x x y
+move_shift y r y
+
+# move2_par x x x x
-swap x xy
+move X1=x X2=x | move X3=x X4=x | independent_moves(X1, X2, X3, X4) => \
+ move2_par X1 X2 X3 X4
+move2_par x x x x
-move Src=x D1=x | move Src=x D2=x => move_dup Src D1 D2
-move Src=x SD=x | move SD=x D=x => move_dup Src SD D
-move Src=x D1=x | move Src=x D2=y => move_dup Src D1 D2
-move Src=y SD=x | move SD=x D=y => move_dup Src SD D
-move Src=x SD=x | move SD=x D=y => move_dup Src SD D
-move Src=y SD=x | move SD=x D=x => move_dup Src SD D
+# move2_par x x x y
-move SD=x D=x | move Src=xy SD=x => move_shift Src SD D
-move SD=y D=x | move Src=x SD=y => move_shift Src SD D
-move SD=x D=y | move Src=x SD=x => move_shift Src SD D
+move X1=x X2=x | move X3=x Y1=y | independent_moves(X1, X2, X3, Y1) => \
+ move2_par X1 X2 X3 Y1
+move X3=x Y1=y | move X1=x X2=x | independent_moves(X3, Y1, X1, X2) => \
+ move2_par X1 X2 X3 Y1
+move2_par x x x y
-# The transformations above guarantee that the source for
-# the second move is not the same as the destination for
-# the first move. That means that we can do the moves in
-# parallel (fetch both values, then store them) which could
-# be faster.
+# move2_par y x y x
-move X1=x Y1=y | move X2=x Y2=y => move2_par X1 Y1 X2 Y2
move Y1=y X1=x | move Y2=y X2=x => move2_par Y1 X1 Y2 X2
+move2_par y x y x
-move X1=x X2=x | move X3=x X4=x => move2_par X1 X2 X3 X4
+# move2_par y x x y
-move X1=x X2=x | move X3=x Y1=y => move2_par X1 X2 X3 Y1
+move S1=y S2=x | move X1=x Y1=y | independent_moves(S1, S2, X1, Y1) => \
+ move2_par S1 S2 X1 Y1
+move X1=x Y1=y | move S1=y S2=x | independent_moves(S1, S2, X1, Y1) => \
+ move2_par S1 S2 X1 Y1
+move2_par y x x y
-move S1=x S2=x | move X1=x Y1=y => move2_par S1 S2 X1 Y1
+# move2_par y x x x
-move S1=y S2=x | move X1=x Y1=y => move2_par S1 S2 X1 Y1
+move Y1=y X1=x | move S1=x D1=x | independent_moves(Y1, X1, S1, D1) => \
+ move2_par Y1 X1 S1 D1
+move S1=x D1=x | move Y1=y X1=x | independent_moves(Y1, X1, S1, D1) => \
+ move2_par Y1 X1 S1 D1
+move2_par y x x x
-move Y1=y X1=x | move S1=x D1=x => move2_par Y1 X1 S1 D1
-move S1=x D1=x | move Y1=y X1=x => move2_par S1 D1 Y1 X1
+# move3
-move2_par X1=x Y1=y X2=x Y2=y | move X3=x Y3=y => move3 X1 Y1 X2 Y2 X3 Y3
move2_par Y1=y X1=x Y2=y X2=x | move Y3=y X3=x => move3 Y1 X1 Y2 X2 Y3 X3
move2_par X1=x X2=x X3=x X4=x | move X5=x X6=x => move3 X1 X2 X3 X4 X5 X6
+move3 y x y x y x
+move3 x x x x x x
+
+# move_x1, move_x2
+
move C=aiq X=x==1 => move_x1 C
move C=aiq X=x==2 => move_x2 C
+move n D=y => init D
+
move_x1 c
move_x2 c
-move_shift x x x
-move_shift y x x
-move_shift x y x
-move_shift x x y
-
-move_dup xy x xy
-
-move2_par x y x y
-move2_par y x y x
-move2_par x x x x
-
-move2_par x x x y
-
-move2_par y x x y
-
-move2_par x x y x
-move2_par y x x x
-
-move3 x y x y x y
-move3 y x y x y x
-move3 x x x x x x
-
-# The compiler almost never generates a "move Literal y(Y)" instruction,
-# so let's cheat if we encounter one.
-move S=n D=y => init D
-move S=c D=y => move S x | move x D
-
-move x x
-move x y
-move y x
-move c x
+move xy xy
+move c xy
move n x
-move y y
# The following move instructions using x(0) are frequently used.
@@ -478,14 +556,25 @@ is_ge f? c x
is_ge f? s s
%hot
-is_eq f? s s
+is_eq Fail=f Const=c Reg=xy => is_eq Fail Reg Const
+is_eq Fail=f C1=c C2=c => move C1 x | is_eq Fail x C2
+is_eq f? S s
-is_ne f? s s
+is_ne Fail=f Const=c Reg=xy => is_ne Fail Reg Const
+is_ne Fail=f C1=c C2=c => move C1 x | is_ne Fail x C2
+is_ne f? S s
#
-# Putting things.
+# Putting tuples.
+#
+# Code compiled with OTP 22 and later uses put_tuple2 to
+# to construct a tuple.
+#
+# Code compiled before OTP 22 uses put_tuple + one put instruction
+# per element. Translate to put_tuple2.
#
+i_put_tuple/2
put_tuple Arity Dst => i_put_tuple Dst u
i_put_tuple Dst Arity Puts=* | put S1 | put S2 | \
@@ -495,11 +584,13 @@ i_put_tuple Dst Arity Puts=* | put S1 | put S2 | \
i_put_tuple Dst Arity Puts=* | put S => \
tuple_append_put(Arity, Dst, Puts, S)
-i_put_tuple/2
+i_put_tuple Dst Arity Puts=* => put_tuple2 Dst Arity Puts
-i_put_tuple xy I
+put_tuple2 xy I
#
+# Putting lists.
+#
# The instruction "put_list Const [] Dst" were generated in rare
# circumstances up to and including OTP 18. Starting with OTP 19,
# AFAIK, it should never be generated.
@@ -510,32 +601,26 @@ put_list Src Dst=x Dst => update_list Src Dst
update_list xyc x
-put_list x n x
-put_list y n x
-put_list x x x
-put_list y x x
+# put_list SrcReg1 SrcReg2 => Dst
+
+put_list xy xy x
-put_list y y x
-put_list x y x
+# put_list SrcReg [] => Dst
-# put_list SrcReg Constant Dst
+put_list xy n xy
-put_list x c x
-put_list x c y
+# put_list SrcReg Constant => x
-put_list y c x
+put_list xy c x
-# put_list Constant SrcReg Dst
+# put_list Constant SrcReg => Dst
-put_list c x x
-put_list c y x
+put_list c xy x
# The following put_list instructions using x(0) are frequently used.
-put_list r n r
-put_list r n x
-put_list r x x
-put_list r x r
+put_list r n rx
+put_list r x rx
put_list x x r
%cold
@@ -602,19 +687,36 @@ is_tuple f? rxy
test_arity Fail Literal=q Arity => move Literal x | test_arity Fail x Arity
test_arity Fail=f c Arity => jump Fail
+test_arity Fail Tuple=x Arity | get_tuple_element Tuple Pos Dst=x => \
+ test_arity_get_tuple_element Fail Tuple Arity Pos Dst
test_arity f? xy A
-get_tuple_element Reg=x P1 D1=x | get_tuple_element Reg=x P2 D2=x | \
+test_arity_get_tuple_element f? x A P x
+
+is_tuple NotTupleFail Tuple=x | is_tagged_tuple WrongRecordFail Tuple Arity Atom => \
+ is_tagged_tuple_ff NotTupleFail WrongRecordFail Tuple Arity Atom
+
+is_tagged_tuple_ff f? f? rx A a
+
+get_tuple_element Reg=x P1 D1=x | \
+ get_tuple_element Reg=x P2 D2=x | \
get_tuple_element Reg=x P3 D3=x | \
- succ(P1, P2) | succ(P2, P3) | \
- succ(D1, D2) | succ(D2, D3) => i_get_tuple_element3 Reg P1 D1
+ succ(P1, P2) | succ(P2, P3) | succ(D1, D2) | succ(D2, D3) | \
+ distinct(D1, Reg) | distinct(D2, Reg) | distinct(D3, Reg) => \
+ i_get_tuple_element3 Reg P1 D1
+
+get_tuple_element Reg=x P1 D1=x | \
+ get_tuple_element Reg=x P2 D2=x | \
+ succ(P1, P2) | succ(D1, D2) | \
+ distinct(D1, Reg) => \
+ i_get_tuple_element2 Reg P1 D1
get_tuple_element Reg=x P1 D1=x | get_tuple_element Reg=x P2 D2=x | \
- succ(P1, P2) | succ(D1, D2) => i_get_tuple_element2 Reg P1 D1
+ succ(P1, P2) | distinct(D1, Reg) => i_get_tuple_element2_dst Reg P1 D1 D2
get_tuple_element Reg=x P1 D1=y | get_tuple_element Reg=x P2 D2=y | \
- succ(P1, P2) => i_get_tuple_element2y Reg P1 D1 D2
+ succ(P1, P2) => i_get_tuple_element2_dst Reg P1 D1 D2
get_tuple_element Reg P Dst => i_get_tuple_element Reg P Dst
@@ -638,14 +740,21 @@ is_list f? y
is_nonempty_list Fail=f S=x | allocate Need Rs => is_nonempty_list_allocate Fail S Need Rs
-is_nonempty_list F=f x==0 | test_heap I1 I2 => is_nonempty_list_test_heap F I1 I2
-
is_nonempty_list Fail=f S=x | get_list S D1=x D2=x => \
is_nonempty_list_get_list Fail S D1 D2
+is_nonempty_list Fail=f S=x | get_hd S Dst=x => \
+ is_nonempty_list_get_hd Fail S Dst
+
+is_nonempty_list Fail=f S=x | get_tl S Dst=x => \
+ is_nonempty_list_get_tl Fail S Dst
+
is_nonempty_list_allocate f? rx t t
-is_nonempty_list_test_heap f? I t
+
is_nonempty_list_get_list f? rx x x
+is_nonempty_list_get_hd f? x x
+is_nonempty_list_get_tl f? x x
+
is_nonempty_list f? xy
is_atom f? x
@@ -710,11 +819,12 @@ is_boolean Fail=f ac => jump Fail
is_boolean f? xy
%hot
-is_function2 Fail=f Literal=q Arity | literal_is_export(Literal) =>
-is_function2 Fail=f c Arity => jump Fail
-is_function2 Fail=f Fun a => jump Fail
+is_function2 Fail=f Fun Arity => gen_is_function2(Fail, Fun, Arity)
-is_function2 f? S s
+%cold
+cold_is_function2 f? x x
+%hot
+hot_is_function2 f? S t
# Allocating & initializing.
allocate Need Regs | init Y => allocate_init Need Regs Y
@@ -946,10 +1056,9 @@ call_ext_only u==0 u$func:os:perf_counter/0 => \
call_ext u Bif=u$is_bif => call_bif Bif
-call_ext_last u Bif=u$is_bif D => call_bif Bif | deallocate_return D
+call_ext_last u Bif=u$is_bif D => deallocate D | call_bif_only Bif
-call_ext_only Ar=u Bif=u$is_bif => \
- allocate u Ar | call_bif Bif | deallocate_return u
+call_ext_only Ar=u Bif=u$is_bif => call_bif_only Bif
#
# Any remaining calls are calls to Erlang functions, not BIFs.
@@ -981,6 +1090,7 @@ i_perf_counter
%hot
call_bif e
+call_bif_only e
#
# Calls to non-building and guard BIFs.
@@ -989,14 +1099,18 @@ call_bif e
bif0 u$bif:erlang:self/0 Dst=d => self Dst
bif0 u$bif:erlang:node/0 Dst=d => node Dst
+bif1 Fail=f Bif=u$bif:erlang:hd/1 Src=x Dst=x => is_nonempty_list_get_hd Fail Src Dst
+bif1 Fail=f Bif=u$bif:erlang:tl/1 Src=x Dst=x => is_nonempty_list_get_tl Fail Src Dst
+
bif1 Fail Bif=u$bif:erlang:get/1 Src=s Dst=d => gen_get(Src, Dst)
bif2 Jump=j u$bif:erlang:element/2 S1=s S2=xy Dst=d => gen_element(Jump, S1, S2, Dst)
-bif1 p Bif S1 Dst => bif1_body Bif S1 Dst
+bif1 p Bif S1 Dst => i_bif1_body S1 Bif Dst
+bif1 Fail=f Bif S1 Dst => i_bif1 S1 Fail Bif Dst
-bif2 p Bif S1 S2 Dst => i_bif2_body Bif S1 S2 Dst
-bif2 Fail Bif S1 S2 Dst => i_bif2 Fail Bif S1 S2 Dst
+bif2 p Bif S1 S2 Dst => i_bif2_body S2 S1 Bif Dst
+bif2 Fail=f Bif S1 S2 Dst => i_bif2 S2 S1 Fail Bif Dst
i_get_hash c I d
i_get s d
@@ -1014,10 +1128,12 @@ i_fast_element xy j? I d
i_element xy j? s d
-bif1 f? b s d
-bif1_body b s d
-i_bif2 f? b s s d
-i_bif2_body b s s d
+i_bif1 s f? b d
+i_bif1_body s b d
+i_bif2 s s f? b d
+i_bif2_body s s b d
+i_bif3 s s s f? b d
+i_bif3_body s s s b d
#
# Internal calls.
@@ -1062,8 +1178,25 @@ call_fun Arity => i_call_fun Arity
i_call_fun t
i_call_fun_last t Q
+
+#
+# A fun with an empty environment can be converted to a literal.
+# As a further optimization, the we try to move the fun to its
+# final destination directly.
+
make_fun2 OldIndex=u => gen_make_fun2(OldIndex)
+move_fun/2
+move_fun Fun X0 | move X0 Dst | move Src X0 => move Fun Dst | move Src X0
+move_fun Fun X0 | move A B | move X0 Dst | move Src X0 | \
+ independent_moves(Fun, X0, A, B) | distinct(Dst, A) => \
+ move Fun Dst | move A B | move Src X0
+move_fun Fun X0 | move X0 Dst | make_fun2 OldIndex | \
+ is_killed_by_make_fun(X0, OldIndex)=> \
+ move Fun Dst | make_fun2 OldIndex
+
+move_fun Fun Dst => move Fun Dst
+
%cold
i_make_fun W t
%hot
@@ -1074,101 +1207,141 @@ is_function Fail=f c => jump Fail
func_info M F A => i_func_info u M F A
# ================================================================
-# New bit syntax matching (R11B).
+# Bit syntax matching obsoleted in OTP 22.
# ================================================================
-%warm
+%cold
bs_start_match2 Fail=f ica X Y D => jump Fail
bs_start_match2 Fail Bin X Y D => i_bs_start_match2 Bin Fail X Y D
-i_bs_start_match2 xy f t t x
+i_bs_start_match2 xy f t t d
+bs_save2 Y=y Index => move Y x | bs_save2 x Index
bs_save2 Reg Index => gen_bs_save(Reg, Index)
i_bs_save2 x t
+bs_restore2 Y=y Index => move Y x | bs_restore2 x Index
bs_restore2 Reg Index => gen_bs_restore(Reg, Index)
i_bs_restore2 x t
+bs_context_to_binary Y=y | line L | badmatch Y => \
+ move Y x | bs_context_to_binary x | line L | badmatch x
+bs_context_to_binary Y=y => move Y x | bs_context_to_binary x
+bs_context_to_binary x
+%warm
+
+# ================================================================
+# New bit syntax matching (R11B).
+# ================================================================
+
+%warm
+
# Matching integers
bs_match_string Fail Ms Bits Val => i_bs_match_string Ms Fail Bits Val
-i_bs_match_string x f W W
+i_bs_match_string xy f W W
# Fetching integers from binaries.
-bs_get_integer2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \
+bs_get_integer2 Fail=f Ms=xy Live=u Sz=sq Unit=u Flags=u Dst=d => \
gen_get_integer2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
-i_bs_get_integer_small_imm x W f? t x
-i_bs_get_integer_imm x W t f? t x
-i_bs_get_integer f? t t x s x
-i_bs_get_integer_8 x f? x
-i_bs_get_integer_16 x f? x
+i_bs_get_integer_small_imm Ms Bits Fail Flags Y=y => \
+ i_bs_get_integer_small_imm Ms Bits Fail Flags x | move x Y
+
+i_bs_get_integer_imm Ms Bits Live Fail Flags Y=y => \
+ i_bs_get_integer_imm Ms Bits Live Fail Flags x | move x Y
+
+i_bs_get_integer_small_imm xy W f? t x
+i_bs_get_integer_imm xy W t f? t x
+i_bs_get_integer xy f? t t s d
+i_bs_get_integer_8 xy f? d
+i_bs_get_integer_16 xy f? d
%if ARCH_64
-i_bs_get_integer_32 x f? x
+i_bs_get_integer_32 xy f? d
%endif
# Fetching binaries from binaries.
-bs_get_binary2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \
+bs_get_binary2 Fail=f Ms=xy Live=u Sz=sq Unit=u Flags=u Dst=d => \
gen_get_binary2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
-i_bs_get_binary_imm2 f? x t W t x
-i_bs_get_binary2 f x t? s t x
-i_bs_get_binary_all2 f? x t t x
-i_bs_get_binary_all_reuse x f? t
+i_bs_get_binary_imm2 xy f? t W t d
+i_bs_get_binary2 xy f t? s t d
+i_bs_get_binary_all2 xy f? t t d
# Fetching float from binaries.
-bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \
+bs_get_float2 Fail=f Ms=xy Live=u Sz=s Unit=u Flags=u Dst=d => \
gen_get_float2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
bs_get_float2 Fail=f Ms=x Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail
-i_bs_get_float2 f? x t s t x
+i_bs_get_float2 xy f? t s t d
# Miscellanous
-bs_skip_bits2 Fail=f Ms=x Sz=sq Unit=u Flags=u => \
+bs_skip_bits2 Fail=f Ms=xy Sz=sq Unit=u Flags=u => \
gen_skip_bits2(Fail, Ms, Sz, Unit, Flags)
-i_bs_skip_bits_imm2 f? x W
-i_bs_skip_bits2 f? x xy t
-i_bs_skip_bits_all2 f? x t
+i_bs_skip_bits_imm2 f? xy W
+i_bs_skip_bits2 xy xy f? t
-bs_test_tail2 Fail=f Ms=x Bits=u==0 => bs_test_zero_tail2 Fail Ms
-bs_test_tail2 Fail=f Ms=x Bits=u => bs_test_tail_imm2 Fail Ms Bits
-bs_test_zero_tail2 f? x
-bs_test_tail_imm2 f? x W
+bs_test_tail2 Fail=f Ms=xy Bits=u==0 => bs_test_zero_tail2 Fail Ms
+bs_test_tail2 Fail=f Ms=xy Bits=u => bs_test_tail_imm2 Fail Ms Bits
+bs_test_zero_tail2 f? xy
+bs_test_tail_imm2 f? xy W
bs_test_unit F Ms Unit=u==8 => bs_test_unit8 F Ms
-bs_test_unit f? x t
-bs_test_unit8 f? x
+bs_test_unit f? xy t
+bs_test_unit8 f? xy
-# An y register operand for bs_context_to_binary is rare,
-# but can happen because of inlining.
+# Gets a bitstring from the tail of a context.
+bs_get_tail xy d t
-bs_context_to_binary Y=y | line L | badmatch Y => \
- move Y x | bs_context_to_binary x | line L | badmatch x
+# New bs_start_match variant for contexts with external position storage.
+#
+# bs_get/set_position is used to save positions into registers instead of
+# "slots" in the context itself, which lets us continue matching even after
+# we've passed it off to another function.
-bs_context_to_binary Y=y => move Y x | bs_context_to_binary x
+%if ARCH_64
+bs_start_match3 Fail Bin Live Ctx | bs_get_position Ctx Pos=x Ignored => \
+ i_bs_start_match3_gp Bin Live Fail Ctx Pos
+i_bs_start_match3_gp xy t f d x
+%endif
-bs_context_to_binary x
+bs_start_match3 Fail=f ica Live Dst => jump Fail
+bs_start_match3 Fail Bin Live Dst => i_bs_start_match3 Bin Live Fail Dst
+
+i_bs_start_match3 xy t f d
+
+# Match context position instructions. 64-bit assumes that all positions can
+# fit into an unsigned small.
+
+%if ARCH_64
+ bs_get_position Src Dst Live => i_bs_get_position Src Dst
+ i_bs_get_position xy xy
+ bs_set_position xy xy
+%else
+ bs_get_position xy d t?
+ bs_set_position xy xy
+%endif
#
# Utf8/utf16/utf32 support. (R12B-5)
#
-bs_get_utf8 Fail=f Ms=x u u Dst=d => i_bs_get_utf8 Ms Fail Dst
-i_bs_get_utf8 x f? x
+bs_get_utf8 Fail=f Ms=xy u u Dst=d => i_bs_get_utf8 Ms Fail Dst
+i_bs_get_utf8 xy f? d
-bs_skip_utf8 Fail=f Ms=x u u => i_bs_get_utf8 Ms Fail x
+bs_skip_utf8 Fail=f Ms=xy u u => i_bs_get_utf8 Ms Fail x
-bs_get_utf16 Fail=f Ms=x u Flags=u Dst=d => i_bs_get_utf16 Ms Fail Flags Dst
-bs_skip_utf16 Fail=f Ms=x u Flags=u => i_bs_get_utf16 Ms Fail Flags x
+bs_get_utf16 Fail=f Ms=xy u Flags=u Dst=d => i_bs_get_utf16 Ms Fail Flags Dst
+bs_skip_utf16 Fail=f Ms=xy u Flags=u => i_bs_get_utf16 Ms Fail Flags x
-i_bs_get_utf16 x f? t x
+i_bs_get_utf16 xy f? t d
-bs_get_utf32 Fail=f Ms=x Live=u Flags=u Dst=d => \
+bs_get_utf32 Fail=f Ms=xy Live=u Flags=u Dst=d => \
bs_get_integer2 Fail Ms Live i=32 u=1 Flags Dst | \
i_bs_validate_unicode_retract Fail Dst Ms
-bs_skip_utf32 Fail=f Ms=x Live=u Flags=u => \
+bs_skip_utf32 Fail=f Ms=xy Live=u Flags=u => \
bs_get_integer2 Fail Ms Live i=32 u=1 Flags x | \
i_bs_validate_unicode_retract Fail x Ms
@@ -1182,6 +1355,9 @@ i_bs_validate_unicode_retract j s S
bs_init2 Fail Sz Words Regs Flags Dst | binary_too_big(Sz) => system_limit Fail
+bs_init2 Fail Sz Words Regs Flags Dst=y => \
+ bs_init2 Fail Sz Words Regs Flags x | move x Dst
+
bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init Sz Regs Dst
bs_init2 Fail Sz=u Words Regs Flags Dst => \
@@ -1202,6 +1378,8 @@ i_bs_init_heap W I t? x
bs_init_bits Fail Sz=o Words Regs Flags Dst => system_limit Fail
+bs_init_bits Fail Sz Words Regs Flags Dst=y => \
+ bs_init_bits Fail Sz Words Regs Flags x | move x Dst
bs_init_bits Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init_bits Sz Regs Dst
bs_init_bits Fail Sz=u Words Regs Flags Dst => i_bs_init_bits_heap Sz Words Regs Dst
@@ -1230,7 +1408,7 @@ bs_private_append Fail Size Unit Bin Flags Dst => \
bs_init_writable
-i_bs_append j? I t? t s x
+i_bs_append j? I t? t s xy
i_bs_private_append j? t s S x
#
@@ -1240,31 +1418,35 @@ i_bs_private_append j? t s S x
bs_put_integer Fail=j Sz=sq Unit=u Flags=u Src=s => \
gen_put_integer(Fail, Sz, Unit, Flags, Src)
-i_new_bs_put_integer j? s t s
-i_new_bs_put_integer_imm j? W t s
+i_new_bs_put_integer j? S t s
+i_new_bs_put_integer_imm xyc j? W t
#
# Utf8/utf16/utf32 support. (R12B-5)
#
-bs_utf8_size j Src=s Dst=d => i_bs_utf8_size Src Dst
+bs_utf8_size j Src Dst=d => i_bs_utf8_size Src Dst
+bs_utf16_size j Src Dst=d => i_bs_utf16_size Src Dst
-i_bs_utf8_size s x
+bs_put_utf8 Fail u Src => i_bs_put_utf8 Fail Src
-bs_utf16_size j Src=s Dst=d => i_bs_utf16_size Src Dst
-
-i_bs_utf16_size s x
-
-bs_put_utf8 Fail u Src=s => i_bs_put_utf8 Fail Src
+bs_put_utf32 Fail=j Flags=u Src=s => \
+ i_bs_validate_unicode Fail Src | bs_put_integer Fail i=32 u=1 Flags Src
-i_bs_put_utf8 j? s
+i_bs_utf8_size S x
+i_bs_utf16_size S x
-bs_put_utf16 j? t s
+i_bs_put_utf8 j? S
+bs_put_utf16 j? t S
-bs_put_utf32 Fail=j Flags=u Src=s => \
- i_bs_validate_unicode Fail Src | bs_put_integer Fail i=32 u=1 Flags Src
+i_bs_validate_unicode j? S
-i_bs_validate_unicode j? s
+# Handle unoptimized code.
+i_bs_utf8_size Src=c Dst => move Src x | i_bs_utf8_size x Dst
+i_bs_utf16_size Src=c Dst => move Src x | i_bs_utf16_size x Dst
+i_bs_put_utf8 Fail Src=c => move Src x | i_bs_put_utf8 Fail x
+bs_put_utf16 Fail Flags Src=c => move Src x | bs_put_utf16 Fail Flags x
+i_bs_validate_unicode Fail Src=c => move Src x | i_bs_validate_unicode Fail x
#
# Storing floats into binaries.
@@ -1274,7 +1456,7 @@ bs_put_float Fail Sz=q Unit Flags Val => badarg Fail
bs_put_float Fail=j Sz=s Unit=u Flags=u Src=s => \
gen_put_float(Fail, Sz, Unit, Flags, Src)
-i_new_bs_put_float j? s t s
+i_new_bs_put_float j? S t s
i_new_bs_put_float_imm j? W t s
#
@@ -1284,9 +1466,18 @@ i_new_bs_put_float_imm j? W t s
bs_put_binary Fail=j Sz=s Unit=u Flags=u Src=s => \
gen_put_binary(Fail, Sz, Unit, Flags, Src)
-i_new_bs_put_binary j? s t s
-i_new_bs_put_binary_imm j? W s
-i_new_bs_put_binary_all j? s t
+# In unoptimized code, the binary argument could be a literal. (In optimized code,
+# there would be a bs_put_string instruction.)
+i_new_bs_put_binary Fail Size Unit Lit=c => \
+ move Lit x | i_new_bs_put_binary Fail Size Unit x
+i_new_bs_put_binary_imm Fail Size Lit=c => \
+ move Lit x | i_new_bs_put_binary_imm Fail Size x
+i_new_bs_put_binary_all Lit=c Fail Unit => \
+ move Lit x | i_new_bs_put_binary_all x Fail Unit
+
+i_new_bs_put_binary j? S t S
+i_new_bs_put_binary_imm j? W S
+i_new_bs_put_binary_all xy j? t
#
# Warning: The i_bs_put_string and i_new_bs_put_string instructions
@@ -1384,23 +1575,22 @@ put_map_exact F Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \
sorted_put_map_assoc Map Dst Live Size Rest=* | is_empty_map(Map) => \
new_map Dst Live Size Rest
-sorted_put_map_assoc Src=s Dst Live Size Rest=* => \
- update_map_assoc Src Dst Live Size Rest
-sorted_put_map_assoc Src Dst Live Size Rest=* => \
- move Src x | update_map_assoc x Dst Live Size Rest
+sorted_put_map_assoc Src=xyc Dst Live Size Rest=* => \
+ update_map_assoc Src Dst Live Size Rest
-sorted_put_map_exact F Src=s Dst Live Size Rest=* => \
- update_map_exact F Src Dst Live Size Rest
-sorted_put_map_exact F Src Dst Live Size Rest=* => \
- move Src x | update_map_exact F x Dst Live Size Rest
+sorted_put_map_exact Fail Src=xy Dst Live Size Rest=* => \
+ update_map_exact Src Fail Dst Live Size Rest
+# Literal map arguments for an exact update operation are extremely rare.
+sorted_put_map_exact Fail Src Dst Live Size Rest=* => \
+ move Src x | update_map_exact x Fail Dst Live Size Rest
new_map Dst Live Size Rest=* | is_small_map_literal_keys(Size, Rest) => \
gen_new_small_map_lit(Dst, Live, Size, Rest)
new_map d t I
i_new_small_map_lit d t q
-update_map_assoc s d t I
-update_map_exact j? s d t I
+update_map_assoc xyc d t I
+update_map_exact xy j? d t I
is_map Fail Lit=q | literal_is_map(Lit) =>
is_map Fail cq => jump Fail
@@ -1447,80 +1637,93 @@ gc_bif2 Fail Live u$bif:erlang:sminus/2 S1 S2 Dst => \
#
# Optimize addition and subtraction of small literals using
-# the i_increment/4 instruction (in bodies, not in guards).
+# the i_increment/3 instruction (in bodies, not in guards).
#
gen_plus p Live Int=i Reg=d Dst => \
- gen_increment(Reg, Int, Live, Dst)
+ gen_increment(Reg, Int, Dst)
gen_plus p Live Reg=d Int=i Dst => \
- gen_increment(Reg, Int, Live, Dst)
+ gen_increment(Reg, Int, Dst)
gen_minus p Live Reg=d Int=i Dst | negation_is_small(Int) => \
- gen_increment_from_minus(Reg, Int, Live, Dst)
+ gen_increment_from_minus(Reg, Int, Dst)
#
-# GCing arithmetic instructions.
+# Arithmetic instructions.
#
-gen_plus Fail Live S1 S2 Dst => i_plus S1 S2 Fail Live Dst
+# It is OK to swap arguments for '+' in a guard. It is also
+# OK to turn minus into plus in a guard.
+gen_plus Fail=f Live S1=c S2 Dst => i_plus S2 S1 Fail Dst
+gen_minus Fail=f Live S1 S2=i Dst => gen_plus_from_minus(Fail, Live, S1, S2, Dst)
+
+gen_plus Fail Live S1 S2 Dst => i_plus S1 S2 Fail Dst
-gen_minus Fail Live S1 S2 Dst => i_minus S1 S2 Fail Live Dst
+gen_minus Fail Live S1 S2 Dst => i_minus S1 S2 Fail Dst
gc_bif2 Fail Live u$bif:erlang:stimes/2 S1 S2 Dst => \
- i_times Fail Live S1 S2 Dst
+ i_times Fail S1 S2 Dst
gc_bif2 Fail Live u$bif:erlang:div/2 S1 S2 Dst => \
- i_m_div Fail Live S1 S2 Dst
+ i_m_div Fail S1 S2 Dst
gc_bif2 Fail Live u$bif:erlang:intdiv/2 S1 S2 Dst => \
- i_int_div Fail Live S1 S2 Dst
+ i_int_div Fail S1 S2 Dst
gc_bif2 Fail Live u$bif:erlang:rem/2 S1 S2 Dst => \
- i_rem S1 S2 Fail Live Dst
+ i_rem S1 S2 Fail Dst
gc_bif2 Fail Live u$bif:erlang:bsl/2 S1 S2 Dst => \
- i_bsl S1 S2 Fail Live Dst
+ i_bsl S1 S2 Fail Dst
gc_bif2 Fail Live u$bif:erlang:bsr/2 S1 S2 Dst => \
- i_bsr S1 S2 Fail Live Dst
+ i_bsr S1 S2 Fail Dst
gc_bif2 Fail Live u$bif:erlang:band/2 S1 S2 Dst => \
- i_band S1 S2 Fail Live Dst
+ i_band S1 S2 Fail Dst
gc_bif2 Fail Live u$bif:erlang:bor/2 S1 S2 Dst => \
- i_bor Fail Live S1 S2 Dst
+ i_bor Fail S1 S2 Dst
gc_bif2 Fail Live u$bif:erlang:bxor/2 S1 S2 Dst => \
- i_bxor Fail Live S1 S2 Dst
+ i_bxor Fail S1 S2 Dst
-gc_bif1 Fail I u$bif:erlang:bnot/1 Src Dst=d => i_int_bnot Fail Src I Dst
+gc_bif1 Fail Live u$bif:erlang:bnot/1 Src Dst=d => i_int_bnot Fail Src Dst
-i_increment rxy W t d
+i_increment rxy W d
-i_plus x xy j? t d
-i_plus s s j? t d
+# Handle unoptimized code.
+i_plus S1=c S2=c Fail Dst => move S1 x | i_plus x S2 Fail Dst
-i_minus x x j? t d
-i_minus s s j? t d
+i_plus xy xyc j? d
-i_times j? t s s d
+# A minus instruction with a constant right operand will be
+# converted to an i_increment instruction, except in guards or
+# when the negated value of the constant won't fit in a guard.
+# Therefore, it very rare.
+i_minus S1 S2=c Fail Dst => move S2 x | i_minus S1 x Fail Dst
-i_m_div j? t s s d
-i_int_div j? t s s d
+i_minus xy xy j? d
+i_minus c xy j? d
-i_rem x x j? t d
-i_rem s s j? t d
+i_times j? s s d
-i_bsl s s j? t d
-i_bsr s s j? t d
+i_m_div j? s s d
+i_int_div j? s s d
-i_band x c j? t d
-i_band s s j? t d
+i_rem x x j? d
+i_rem s s j? d
-i_bor j? I s s d
-i_bxor j? I s s d
+i_bsl s s j? d
+i_bsr s s j? d
-i_int_bnot Fail Src=c Live Dst => move Src x | i_int_bnot Fail x Live Dst
+i_band x c j? d
+i_band s s j? d
-i_int_bnot j? S t d
+i_bor j? s s d
+i_bxor j? s s d
+
+i_int_bnot Fail Src=c Dst => move Src x | i_int_bnot Fail x Dst
+
+i_int_bnot j? S d
#
# Old guard BIFs that creates heap fragments are no longer allowed.
@@ -1533,29 +1736,28 @@ bif1 Fail u$bif:erlang:round/1 s d => too_old_compiler
bif1 Fail u$bif:erlang:trunc/1 s d => too_old_compiler
#
-# Guard BIFs.
+# Handle the length/1 guard BIF specially to make it trappable.
#
-gc_bif1 Fail I Bif Src Dst => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif2 Fail I Bif S1 S2 Dst => \
- gen_guard_bif2(Fail, I, Bif, S1, S2, Dst)
-gc_bif3 Fail I Bif S1 S2 S3 Dst => \
- gen_guard_bif3(Fail, I, Bif, S1, S2, S3, Dst)
+gc_bif1 Fail=j Live u$bif:erlang:length/1 Src Dst => \
+ i_length_setup Live Src | i_length Fail Live Dst
-i_gc_bif1 j? W s t? d
+i_length_setup Live Src=c => move Src x | i_length_setup Live x
-i_gc_bif2 j? W t? s s d
+i_length_setup t xy
+i_length j? t d
-ii_gc_bif3/7
+#
+# Guard BIFs.
+#
+gc_bif1 p Live Bif Src Dst => i_bif1_body Src Bif Dst
+gc_bif1 Fail=f Live Bif Src Dst => i_bif1 Src Fail Bif Dst
-# A specific instruction can only have 6 operands, so we must
-# pass one of the arguments in an x register.
-ii_gc_bif3 Fail Bif Live S1 S2 S3 Dst => \
- move S1 x | i_gc_bif3 Fail Bif Live S2 S3 Dst
+gc_bif2 p Live Bif S1 S2 Dst => i_bif2_body S2 S1 Bif Dst
+gc_bif2 Fail=f Live Bif S1 S2 Dst => i_bif2 S2 S1 Fail Bif Dst
-i_gc_bif3 j? W t? s s d
+gc_bif3 p Live Bif S1 S2 S3 Dst => i_bif3_body S3 S2 S1 Bif Dst
+gc_bif3 Fail=f Live Bif S1 S2 S3 Dst => i_bif3 S3 S2 S1 Fail Bif Dst
#
# The following instruction is specially handled in beam_load.c
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index a69da4d762..c261c8e117 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -63,6 +63,14 @@
# endif
#endif
+#ifndef ERTS_NOINLINE
+# if ERTS_AT_LEAST_GCC_VSN__(3,1,1)
+# define ERTS_NOINLINE __attribute__((__noinline__))
+# else
+# define ERTS_NOINLINE
+# endif
+#endif
+
#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
# undef ERTS_CAN_INLINE
# define ERTS_CAN_INLINE 0
@@ -111,6 +119,23 @@
#endif
#endif
+/*
+ * Test for clang's convenient __has_builtin feature checking macro.
+ */
+#ifndef __has_builtin
+ #define __has_builtin(x) 0
+#endif
+
+/*
+ * Define HAVE_OVERFLOW_CHECK_BUILTINS if the overflow checking arithmetic
+ * builtins are available.
+ */
+#if ERTS_AT_LEAST_GCC_VSN__(5, 1, 0)
+# define HAVE_OVERFLOW_CHECK_BUILTINS 1
+#elif __has_builtin(__builtin_mul_overflow)
+# define HAVE_OVERFLOW_CHECK_BUILTINS 1
+#endif
+
#include "erl_misc_utils.h"
/*
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index a3069e419a..9eb020d070 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -316,7 +316,7 @@ struct ErtsTimerWheel_ {
#define ERTS_TW_SLOT_AT_ONCE (-1)
#define ERTS_TW_BUMP_LATER_WHEEL(TIW) \
- ((tiw)->pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE >= (TIW)->later.pos)
+ ((TIW)->pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE >= (TIW)->later.pos)
static int bump_later_wheel(ErtsTimerWheel *tiw, int *yield_count_p);
@@ -701,7 +701,8 @@ remove_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
if (slot < ERTS_TW_SOON_WHEEL_END_SLOT) {
if (empty_slot
&& tiw->true_next_timeout_time
- && p->timeout_pos == tiw->next_timeout_pos) {
+ && p->timeout_pos == tiw->next_timeout_pos
+ && tiw->yield_slot == ERTS_TW_SLOT_INACTIVE) {
tiw->true_next_timeout_time = 0;
}
if (--tiw->soon.nto == 0)
@@ -714,7 +715,8 @@ remove_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
ErtsMonotonicTime tpos = tiw->later.min_tpos;
tpos &= ERTS_TW_LATER_WHEEL_POS_MASK;
tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
- if (tpos == tiw->next_timeout_pos)
+ if (tpos == tiw->next_timeout_pos
+ && tiw->yield_slot == ERTS_TW_SLOT_INACTIVE)
tiw->true_next_timeout_time = 0;
}
if (--tiw->later.nto == 0) {
@@ -908,7 +910,6 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
{
ErtsMonotonicTime tmp_slots = bump_to - tiw->pos;
- tmp_slots = (bump_to - tiw->pos);
if (tmp_slots < ERTS_TW_SOON_WHEEL_SIZE)
slots = (int) tmp_slots;
else
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index c5deed38ad..0bbae65e28 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -1069,11 +1069,11 @@ do { \
#define HCONST 0x9e3779b9UL /* the golden ratio; an arbitrary value */
-Uint32
-block_hash(byte *k, unsigned length, Uint32 initval)
+static Uint32
+block_hash(byte *k, Uint length, Uint32 initval)
{
Uint32 a,b,c;
- unsigned len;
+ Uint len;
/* Set up the internal state */
len = length;
@@ -1569,7 +1569,7 @@ make_hash2(Eterm term)
* MUST BE USED AS INPUT FOR THE HASH. Two different terms must always have a
* chance of hashing different when salted: hash([Salt|A]) vs hash([Salt|B]).
*
- * This is why we can not use cached hash values for atoms for example.
+ * This is why we cannot use cached hash values for atoms for example.
*
*/
@@ -1749,7 +1749,7 @@ make_internal_hash(Eterm term, Uint32 salt)
case SUB_BINARY_SUBTAG:
{
byte* bptr;
- unsigned sz = binary_size(term);
+ Uint sz = binary_size(term);
Uint32 con = HCONST_13 + hash;
Uint bitoffs;
Uint bitsize;
@@ -2701,7 +2701,8 @@ Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only)
if((AN)->sysname != (BN)->sysname) \
RETURN_NEQ(erts_cmp_atoms((AN)->sysname, (BN)->sysname)); \
ASSERT((AN)->creation != (BN)->creation); \
- RETURN_NEQ(((AN)->creation < (BN)->creation) ? -1 : 1); \
+ if ((AN)->creation != 0 && (BN)->creation != 0) \
+ RETURN_NEQ(((AN)->creation < (BN)->creation) ? -1 : 1); \
} \
} while (0)
@@ -3486,7 +3487,7 @@ store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
if (is_external_header(*from_hp)) {
ExternalThing *etp = (ExternalThing *) from_hp;
ASSERT(is_external(ns));
- erts_refc_inc(&etp->node->refc, 2);
+ erts_ref_node_entry(etp->node, 2, make_boxed(to_hp));
}
else if (is_ordinary_ref_thing(from_hp))
return make_internal_ref(to_hp);